code
stringlengths 1.03k
250k
| repo_name
stringlengths 7
70
| path
stringlengths 4
177
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 1.03k
250k
|
---|---|---|---|---|---|
/* drivers/i2c/chips/lis3dh.c - LIS3DH motion sensor driver
*
*
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/miscdevice.h>
#include <asm/uaccess.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
#include <linux/earlysuspend.h>
#include <linux/platform_device.h>
#include <asm/atomic.h>
#include <cust_acc.h>
#include <linux/hwmsensor.h>
#include <linux/hwmsen_dev.h>
#include <linux/sensors_io.h>
#include "lis3dh.h"
#include <linux/hwmsen_helper.h>
#include <mach/mt_devs.h>
#include <mach/mt_typedefs.h>
#include <mach/mt_gpio.h>
#include <mach/mt_pm_ldo.h>
#define POWER_NONE_MACRO MT65XX_POWER_NONE
/*----------------------------------------------------------------------------*/
//#define I2C_DRIVERID_LIS3DH 345
/*----------------------------------------------------------------------------*/
#define DEBUG 1
/*----------------------------------------------------------------------------*/
#define CONFIG_LIS3DH_LOWPASS /*apply low pass filter on output*/
/*----------------------------------------------------------------------------*/
#define LIS3DH_AXIS_X 0
#define LIS3DH_AXIS_Y 1
#define LIS3DH_AXIS_Z 2
#define LIS3DH_AXES_NUM 3
#define LIS3DH_DATA_LEN 6
#define LIS3DH_DEV_NAME "LIS3DH"
/*----------------------------------------------------------------------------*/
static const struct i2c_device_id lis3dh_i2c_id[] = {{LIS3DH_DEV_NAME,0},{}};
/*the adapter id will be available in customization*/
static struct i2c_board_info __initdata i2c_LIS3DH={ I2C_BOARD_INFO("LIS3DH", (0x32>>1))};
//static unsigned short lis3dh_force[] = {0x00, LIS3DH_I2C_SLAVE_ADDR, I2C_CLIENT_END, I2C_CLIENT_END};
//static const unsigned short *const lis3dh_forces[] = { lis3dh_force, NULL };
//static struct i2c_client_address_data lis3dh_addr_data = { .forces = lis3dh_forces,};
/*----------------------------------------------------------------------------*/
static int lis3dh_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id);
static int lis3dh_i2c_remove(struct i2c_client *client);
//static int lis3dh_i2c_detect(struct i2c_client *client, int kind, struct i2c_board_info *info);
static int lis3dh_local_init(void);
static int lis3dh_remove(void);
static int lis3dh_init_flag =0; // 0<==>OK -1 <==> fail
/*----------------------------------------------------------------------------*/
typedef enum {
ADX_TRC_FILTER = 0x01,
ADX_TRC_RAWDATA = 0x02,
ADX_TRC_IOCTL = 0x04,
ADX_TRC_CALI = 0X08,
ADX_TRC_INFO = 0X10,
} ADX_TRC;
/*----------------------------------------------------------------------------*/
struct scale_factor{
u8 whole;
u8 fraction;
};
/*----------------------------------------------------------------------------*/
struct data_resolution {
struct scale_factor scalefactor;
int sensitivity;
};
/*----------------------------------------------------------------------------*/
#define C_MAX_FIR_LENGTH (32)
/*----------------------------------------------------------------------------*/
struct data_filter {
s16 raw[C_MAX_FIR_LENGTH][LIS3DH_AXES_NUM];
int sum[LIS3DH_AXES_NUM];
int num;
int idx;
};
/*----------------------------------------------------------------------------*/
static struct sensor_init_info lis3dh_init_info = {
.name = "lis3dh",
.init = lis3dh_local_init,
.uninit = lis3dh_remove,
};
/*----------------------------------------------------------------------------*/
struct lis3dh_i2c_data {
struct i2c_client *client;
struct acc_hw *hw;
struct hwmsen_convert cvt;
/*misc*/
struct data_resolution *reso;
atomic_t trace;
atomic_t suspend;
atomic_t selftest;
atomic_t filter;
s16 cali_sw[LIS3DH_AXES_NUM+1];
/*data*/
s8 offset[LIS3DH_AXES_NUM+1]; /*+1: for 4-byte alignment*/
s16 data[LIS3DH_AXES_NUM+1];
#if defined(CONFIG_LIS3DH_LOWPASS)
atomic_t firlen;
atomic_t fir_en;
struct data_filter fir;
#endif
/*early suspend*/
#if defined(CONFIG_HAS_EARLYSUSPEND)
struct early_suspend early_drv;
#endif
};
/*----------------------------------------------------------------------------*/
static struct i2c_driver lis3dh_i2c_driver = {
.driver = {
// .owner = THIS_MODULE,
.name = LIS3DH_DEV_NAME,
},
.probe = lis3dh_i2c_probe,
.remove = lis3dh_i2c_remove,
// .detect = lis3dh_i2c_detect,
#if !defined(CONFIG_HAS_EARLYSUSPEND)
.suspend = lis3dh_suspend,
.resume = lis3dh_resume,
#endif
.id_table = lis3dh_i2c_id,
// .address_data = &lis3dh_addr_data,
};
/*----------------------------------------------------------------------------*/
static struct i2c_client *lis3dh_i2c_client = NULL;
//static struct platform_driver lis3dh_gsensor_driver;
static struct lis3dh_i2c_data *obj_i2c_data = NULL;
static bool sensor_power = false;
static GSENSOR_VECTOR3D gsensor_gain, gsensor_offset;
//static char selftestRes[10] = {0};
/*----------------------------------------------------------------------------*/
#define GSE_TAG "[Gsensor] "
#define GSE_FUN(f) printk(KERN_INFO GSE_TAG"%s\n", __FUNCTION__)
#define GSE_ERR(fmt, args...) printk(KERN_ERR GSE_TAG"%s %d : "fmt, __FUNCTION__, __LINE__, ##args)
#define GSE_LOG(fmt, args...) printk(KERN_INFO GSE_TAG fmt, ##args)
/*----------------------------------------------------------------------------*/
static struct data_resolution lis3dh_data_resolution[] = {
/* combination by {FULL_RES,RANGE}*/
{{ 1, 0}, 1024}, // dataformat +/-2g in 12-bit resolution; { 1, 0} = 1.0 = (2*2*1000)/(2^12); 1024 = (2^12)/(2*2)
{{ 1, 9}, 512}, // dataformat +/-4g in 12-bit resolution; { 1, 9} = 1.9 = (2*4*1000)/(2^12); 512 = (2^12)/(2*4)
{{ 3, 9}, 256}, // dataformat +/-8g in 12-bit resolution; { 1, 0} = 1.0 = (2*8*1000)/(2^12); 1024 = (2^12)/(2*8)
};
/*----------------------------------------------------------------------------*/
static struct data_resolution lis3dh_offset_resolution = {{15, 6}, 64};
/*
static int hwmsen_read_byte_sr(struct i2c_client *client, u8 addr, u8 *data)
{
u8 buf;
int ret = 0;
client->addr = client->addr& I2C_MASK_FLAG | I2C_WR_FLAG |I2C_RS_FLAG;
buf = addr;
ret = i2c_master_send(client, (const char*)&buf, 1<<8 | 1);
//ret = i2c_master_send(client, (const char*)&buf, 1);
if (ret < 0) {
GSE_ERR("send command error!!\n");
return -EFAULT;
}
*data = buf;
client->addr = client->addr& I2C_MASK_FLAG;
return 0;
}
*/
static void dumpReg(struct i2c_client *client)
{
int i=0;
u8 addr = 0x20;
u8 regdata=0;
for(i=0; i<3 ; i++)
{
//dump all
hwmsen_read_byte(client,addr,®data);
GSE_LOG("Reg addr=%x regdata=%x\n",addr,regdata);
addr++;
}
}
/*--------------------ADXL power control function----------------------------------*/
static void LIS3DH_power(struct acc_hw *hw, unsigned int on)
{
static unsigned int power_on = 0;
if(hw->power_id != POWER_NONE_MACRO) // have externel LDO
{
GSE_LOG("power %s\n", on ? "on" : "off");
if(power_on == on) // power status not change
{
GSE_LOG("ignore power control: %d\n", on);
}
else if(on) // power on
{
if(!hwPowerOn(hw->power_id, hw->power_vol, "LIS3DH"))
{
GSE_ERR("power on fails!!\n");
}
}
else // power off
{
if (!hwPowerDown(hw->power_id, "LIS3DH"))
{
GSE_ERR("power off fail!!\n");
}
}
}
power_on = on;
}
/*----------------------------------------------------------------------------*/
static int LIS3DH_SetDataResolution(struct lis3dh_i2c_data *obj)
{
int err = 0;
u8 dat, reso;
err = hwmsen_read_byte(obj->client, LIS3DH_REG_CTL_REG4, &dat);
if(err)
{
GSE_ERR("write data format fail!!\n");
return err;
}
/*the data_reso is combined by 3 bits: {FULL_RES, DATA_RANGE}*/
reso = (dat & 0x30)<<4;
if(reso >= 0x3)
reso = 0x2;
if(reso < sizeof(lis3dh_data_resolution)/sizeof(lis3dh_data_resolution[0]))
{
obj->reso = &lis3dh_data_resolution[reso];
return 0;
}
else
{
return -EINVAL;
}
}
/*----------------------------------------------------------------------------*/
static int LIS3DH_ReadData(struct i2c_client *client, s16 data[LIS3DH_AXES_NUM])
{
struct lis3dh_i2c_data *priv = i2c_get_clientdata(client);
// u8 addr = LIS3DH_REG_DATAX0;
u8 buf[LIS3DH_DATA_LEN] = {0};
int err = 0;
if(NULL == client)
{
err = -EINVAL;
}
else
{
if(hwmsen_read_block(client, LIS3DH_REG_OUT_X, buf, 0x01))
{
GSE_ERR("read G sensor data register err!\n");
return -1;
}
if(hwmsen_read_block(client, LIS3DH_REG_OUT_X+1, &buf[1], 0x01))
{
GSE_ERR("read G sensor data register err!\n");
return -1;
}
data[LIS3DH_AXIS_X] = (s16)((buf[0]+(buf[1]<<8))>>4);
if(hwmsen_read_block(client, LIS3DH_REG_OUT_Y, &buf[2], 0x01))
{
GSE_ERR("read G sensor data register err!\n");
return -1;
}
if(hwmsen_read_block(client, LIS3DH_REG_OUT_Y+1, &buf[3], 0x01))
{
GSE_ERR("read G sensor data register err!\n");
return -1;
}
data[LIS3DH_AXIS_Y] = (s16)((s16)(buf[2] +( buf[3]<<8))>>4);
if(hwmsen_read_block(client, LIS3DH_REG_OUT_Z, &buf[4], 0x01))
{
GSE_ERR("read G sensor data register err!\n");
return -1;
}
if(hwmsen_read_block(client, LIS3DH_REG_OUT_Z+1, &buf[5], 0x01))
{
GSE_ERR("read G sensor data register err!\n");
return -1;
}
data[LIS3DH_AXIS_Z] =(s16)((buf[4]+(buf[5]<<8))>>4);
//GSE_LOG("[%08X %08X %08X %08x %08x %08x]\n",buf[0],buf[1],buf[2],buf[3],buf[4],buf[5]);
data[LIS3DH_AXIS_X] &= 0xfff;
data[LIS3DH_AXIS_Y] &= 0xfff;
data[LIS3DH_AXIS_Z] &= 0xfff;
if(atomic_read(&priv->trace) & ADX_TRC_RAWDATA)
{
GSE_LOG("[%08X %08X %08X] => [%5d %5d %5d]\n", data[LIS3DH_AXIS_X], data[LIS3DH_AXIS_Y], data[LIS3DH_AXIS_Z],
data[LIS3DH_AXIS_X], data[LIS3DH_AXIS_Y], data[LIS3DH_AXIS_Z]);
}
if(data[LIS3DH_AXIS_X]&0x800)
{
data[LIS3DH_AXIS_X] = ~data[LIS3DH_AXIS_X];
data[LIS3DH_AXIS_X] &= 0xfff;
data[LIS3DH_AXIS_X]+=1;
data[LIS3DH_AXIS_X] = -data[LIS3DH_AXIS_X];
}
if(data[LIS3DH_AXIS_Y]&0x800)
{
data[LIS3DH_AXIS_Y] = ~data[LIS3DH_AXIS_Y];
data[LIS3DH_AXIS_Y] &= 0xfff;
data[LIS3DH_AXIS_Y]+=1;
data[LIS3DH_AXIS_Y] = -data[LIS3DH_AXIS_Y];
}
if(data[LIS3DH_AXIS_Z]&0x800)
{
data[LIS3DH_AXIS_Z] = ~data[LIS3DH_AXIS_Z];
data[LIS3DH_AXIS_Z] &= 0xfff;
data[LIS3DH_AXIS_Z]+=1;
data[LIS3DH_AXIS_Z] = -data[LIS3DH_AXIS_Z];
}
if(atomic_read(&priv->trace) & ADX_TRC_RAWDATA)
{
GSE_LOG("[%08X %08X %08X] => [%5d %5d %5d] after\n", data[LIS3DH_AXIS_X], data[LIS3DH_AXIS_Y], data[LIS3DH_AXIS_Z],
data[LIS3DH_AXIS_X], data[LIS3DH_AXIS_Y], data[LIS3DH_AXIS_Z]);
}
#ifdef CONFIG_LIS3DH_LOWPASS
if(atomic_read(&priv->filter))
{
if(atomic_read(&priv->fir_en) && !atomic_read(&priv->suspend))
{
int idx, firlen = atomic_read(&priv->firlen);
if(priv->fir.num < firlen)
{
priv->fir.raw[priv->fir.num][LIS3DH_AXIS_X] = data[LIS3DH_AXIS_X];
priv->fir.raw[priv->fir.num][LIS3DH_AXIS_Y] = data[LIS3DH_AXIS_Y];
priv->fir.raw[priv->fir.num][LIS3DH_AXIS_Z] = data[LIS3DH_AXIS_Z];
priv->fir.sum[LIS3DH_AXIS_X] += data[LIS3DH_AXIS_X];
priv->fir.sum[LIS3DH_AXIS_Y] += data[LIS3DH_AXIS_Y];
priv->fir.sum[LIS3DH_AXIS_Z] += data[LIS3DH_AXIS_Z];
if(atomic_read(&priv->trace) & ADX_TRC_FILTER)
{
GSE_LOG("add [%2d] [%5d %5d %5d] => [%5d %5d %5d]\n", priv->fir.num,
priv->fir.raw[priv->fir.num][LIS3DH_AXIS_X], priv->fir.raw[priv->fir.num][LIS3DH_AXIS_Y], priv->fir.raw[priv->fir.num][LIS3DH_AXIS_Z],
priv->fir.sum[LIS3DH_AXIS_X], priv->fir.sum[LIS3DH_AXIS_Y], priv->fir.sum[LIS3DH_AXIS_Z]);
}
priv->fir.num++;
priv->fir.idx++;
}
else
{
idx = priv->fir.idx % firlen;
priv->fir.sum[LIS3DH_AXIS_X] -= priv->fir.raw[idx][LIS3DH_AXIS_X];
priv->fir.sum[LIS3DH_AXIS_Y] -= priv->fir.raw[idx][LIS3DH_AXIS_Y];
priv->fir.sum[LIS3DH_AXIS_Z] -= priv->fir.raw[idx][LIS3DH_AXIS_Z];
priv->fir.raw[idx][LIS3DH_AXIS_X] = data[LIS3DH_AXIS_X];
priv->fir.raw[idx][LIS3DH_AXIS_Y] = data[LIS3DH_AXIS_Y];
priv->fir.raw[idx][LIS3DH_AXIS_Z] = data[LIS3DH_AXIS_Z];
priv->fir.sum[LIS3DH_AXIS_X] += data[LIS3DH_AXIS_X];
priv->fir.sum[LIS3DH_AXIS_Y] += data[LIS3DH_AXIS_Y];
priv->fir.sum[LIS3DH_AXIS_Z] += data[LIS3DH_AXIS_Z];
priv->fir.idx++;
data[LIS3DH_AXIS_X] = priv->fir.sum[LIS3DH_AXIS_X]/firlen;
data[LIS3DH_AXIS_Y] = priv->fir.sum[LIS3DH_AXIS_Y]/firlen;
data[LIS3DH_AXIS_Z] = priv->fir.sum[LIS3DH_AXIS_Z]/firlen;
if(atomic_read(&priv->trace) & ADX_TRC_FILTER)
{
GSE_LOG("add [%2d] [%5d %5d %5d] => [%5d %5d %5d] : [%5d %5d %5d]\n", idx,
priv->fir.raw[idx][LIS3DH_AXIS_X], priv->fir.raw[idx][LIS3DH_AXIS_Y], priv->fir.raw[idx][LIS3DH_AXIS_Z],
priv->fir.sum[LIS3DH_AXIS_X], priv->fir.sum[LIS3DH_AXIS_Y], priv->fir.sum[LIS3DH_AXIS_Z],
data[LIS3DH_AXIS_X], data[LIS3DH_AXIS_Y], data[LIS3DH_AXIS_Z]);
}
}
}
}
#endif
}
return err;
}
/*----------------------------------------------------------------------------*/
/*
static int LIS3DH_ReadOffset(struct i2c_client *client, s8 ofs[LIS3DH_AXES_NUM])
{
int err;
return err;
}
*/
/*----------------------------------------------------------------------------*/
static int LIS3DH_ResetCalibration(struct i2c_client *client)
{
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
memset(obj->cali_sw, 0x00, sizeof(obj->cali_sw));
return 0;
}
/*----------------------------------------------------------------------------*/
static int LIS3DH_ReadCalibration(struct i2c_client *client, int dat[LIS3DH_AXES_NUM])
{
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
dat[obj->cvt.map[LIS3DH_AXIS_X]] = obj->cvt.sign[LIS3DH_AXIS_X]*obj->cali_sw[LIS3DH_AXIS_X];
dat[obj->cvt.map[LIS3DH_AXIS_Y]] = obj->cvt.sign[LIS3DH_AXIS_Y]*obj->cali_sw[LIS3DH_AXIS_Y];
dat[obj->cvt.map[LIS3DH_AXIS_Z]] = obj->cvt.sign[LIS3DH_AXIS_Z]*obj->cali_sw[LIS3DH_AXIS_Z];
return 0;
}
/*----------------------------------------------------------------------------*/
/*
static int LIS3DH_ReadCalibrationEx(struct i2c_client *client, int act[LIS3DH_AXES_NUM], int raw[LIS3DH_AXES_NUM])
{
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
int err;
int mul;
if(err = LIS3DH_ReadOffset(client, obj->offset))
{
GSE_ERR("read offset fail, %d\n", err);
return err;
}
mul = obj->reso->sensitivity/lis3dh_offset_resolution.sensitivity;
raw[LIS3DH_AXIS_X] = obj->offset[LIS3DH_AXIS_X]*mul + obj->cali_sw[LIS3DH_AXIS_X];
raw[LIS3DH_AXIS_Y] = obj->offset[LIS3DH_AXIS_Y]*mul + obj->cali_sw[LIS3DH_AXIS_Y];
raw[LIS3DH_AXIS_Z] = obj->offset[LIS3DH_AXIS_Z]*mul + obj->cali_sw[LIS3DH_AXIS_Z];
act[obj->cvt.map[LIS3DH_AXIS_X]] = obj->cvt.sign[LIS3DH_AXIS_X]*raw[LIS3DH_AXIS_X];
act[obj->cvt.map[LIS3DH_AXIS_Y]] = obj->cvt.sign[LIS3DH_AXIS_Y]*raw[LIS3DH_AXIS_Y];
act[obj->cvt.map[LIS3DH_AXIS_Z]] = obj->cvt.sign[LIS3DH_AXIS_Z]*raw[LIS3DH_AXIS_Z];
return 0;
}
*/
/*----------------------------------------------------------------------------*/
static int LIS3DH_WriteCalibration(struct i2c_client *client, int dat[LIS3DH_AXES_NUM])
{
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
int err = 0;
// int cali[LIS3DH_AXES_NUM];
GSE_FUN();
if(!obj || ! dat)
{
GSE_ERR("null ptr!!\n");
return -EINVAL;
}
else
{
s16 cali[LIS3DH_AXES_NUM];
cali[obj->cvt.map[LIS3DH_AXIS_X]] = obj->cvt.sign[LIS3DH_AXIS_X]*obj->cali_sw[LIS3DH_AXIS_X];
cali[obj->cvt.map[LIS3DH_AXIS_Y]] = obj->cvt.sign[LIS3DH_AXIS_Y]*obj->cali_sw[LIS3DH_AXIS_Y];
cali[obj->cvt.map[LIS3DH_AXIS_Z]] = obj->cvt.sign[LIS3DH_AXIS_Z]*obj->cali_sw[LIS3DH_AXIS_Z];
cali[LIS3DH_AXIS_X] += dat[LIS3DH_AXIS_X];
cali[LIS3DH_AXIS_Y] += dat[LIS3DH_AXIS_Y];
cali[LIS3DH_AXIS_Z] += dat[LIS3DH_AXIS_Z];
obj->cali_sw[LIS3DH_AXIS_X] += obj->cvt.sign[LIS3DH_AXIS_X]*dat[obj->cvt.map[LIS3DH_AXIS_X]];
obj->cali_sw[LIS3DH_AXIS_Y] += obj->cvt.sign[LIS3DH_AXIS_Y]*dat[obj->cvt.map[LIS3DH_AXIS_Y]];
obj->cali_sw[LIS3DH_AXIS_Z] += obj->cvt.sign[LIS3DH_AXIS_Z]*dat[obj->cvt.map[LIS3DH_AXIS_Z]];
}
return err;
}
/*----------------------------------------------------------------------------*/
#if 0
static int LIS3DH_CheckDeviceID(struct i2c_client *client)
{
u8 databuf[10];
int res = 0;
/*
memset(databuf, 0, sizeof(u8)*10);
databuf[0] = LIS3DH_REG_DEVID;
res = i2c_master_send(client, databuf, 0x1);
if(res <= 0)
{
goto exit_LIS3DH_CheckDeviceID;
}
udelay(500);
databuf[0] = 0x0;
res = i2c_master_recv(client, databuf, 0x01);
if(res <= 0)
{
goto exit_LIS3DH_CheckDeviceID;
}
if(databuf[0]!=LIS3DH_FIXED_DEVID)
{
return LIS3DH_ERR_IDENTIFICATION;
}
exit_LIS3DH_CheckDeviceID:
if (res <= 0)
{
return LIS3DH_ERR_I2C;
}
*/
return LIS3DH_SUCCESS;
}
#endif
/*----------------------------------------------------------------------------*/
static int LIS3DH_SetPowerMode(struct i2c_client *client, bool enable)
{
u8 databuf[2];
int res = 0;
u8 addr = LIS3DH_REG_CTL_REG1;
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
if(enable == sensor_power)
{
GSE_LOG("Sensor power status is newest!\n");
return LIS3DH_SUCCESS;
}
if(hwmsen_read_byte(client, addr, &databuf[0]))
{
GSE_ERR("read power ctl register err!\n");
return LIS3DH_ERR_I2C;
}
databuf[0] &= ~LIS3DH_MEASURE_MODE;
if(enable == TRUE)
{
databuf[0] &= ~LIS3DH_MEASURE_MODE;
}
else
{
databuf[0] |= LIS3DH_MEASURE_MODE;
}
databuf[1] = databuf[0];
databuf[0] = LIS3DH_REG_CTL_REG1;
res = i2c_master_send(client, databuf, 0x2);
if(res <= 0)
{
GSE_LOG("set power mode failed!\n");
return LIS3DH_ERR_I2C;
}
else if(atomic_read(&obj->trace) & ADX_TRC_INFO)
{
GSE_LOG("set power mode ok %d!\n", databuf[1]);
}
sensor_power = enable;
return LIS3DH_SUCCESS;
}
/*----------------------------------------------------------------------------*/
static int LIS3DH_SetDataFormat(struct i2c_client *client, u8 dataformat)
{
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
u8 databuf[10];
u8 addr = LIS3DH_REG_CTL_REG4;
int res = 0;
memset(databuf, 0, sizeof(u8)*10);
if(hwmsen_read_byte(client, addr, &databuf[0]))
{
GSE_ERR("read reg_ctl_reg1 register err!\n");
return LIS3DH_ERR_I2C;
}
databuf[0] &= ~0x30;
databuf[0] |=dataformat;
databuf[1] = databuf[0];
databuf[0] = LIS3DH_REG_CTL_REG4;
res = i2c_master_send(client, databuf, 0x2);
if(res <= 0)
{
return LIS3DH_ERR_I2C;
}
return LIS3DH_SetDataResolution(obj);
}
/*----------------------------------------------------------------------------*/
static int LIS3DH_SetBWRate(struct i2c_client *client, u8 bwrate)
{
u8 databuf[10];
u8 addr = LIS3DH_REG_CTL_REG1;
int res = 0;
memset(databuf, 0, sizeof(u8)*10);
if(hwmsen_read_byte(client, addr, &databuf[0]))
{
GSE_ERR("read reg_ctl_reg1 register err!\n");
return LIS3DH_ERR_I2C;
}
databuf[0] &= ~0xF0;
databuf[0] |= bwrate;
databuf[1] = databuf[0];
databuf[0] = LIS3DH_REG_CTL_REG1;
res = i2c_master_send(client, databuf, 0x2);
if(res <= 0)
{
return LIS3DH_ERR_I2C;
}
return LIS3DH_SUCCESS;
}
/*----------------------------------------------------------------------------*/
//enalbe data ready interrupt
static int LIS3DH_SetIntEnable(struct i2c_client *client, u8 intenable)
{
u8 databuf[10];
u8 addr = LIS3DH_REG_CTL_REG3;
int res = 0;
memset(databuf, 0, sizeof(u8)*10);
if(hwmsen_read_byte(client, addr, &databuf[0]))
{
GSE_ERR("read reg_ctl_reg1 register err!\n");
return LIS3DH_ERR_I2C;
}
databuf[0] = 0x00;
databuf[1] = databuf[0];
databuf[0] = LIS3DH_REG_CTL_REG3;
res = i2c_master_send(client, databuf, 0x2);
if(res <= 0)
{
return LIS3DH_ERR_I2C;
}
return LIS3DH_SUCCESS;
}
/*----------------------------------------------------------------------------*/
static int LIS3DH_Init(struct i2c_client *client, int reset_cali)
{
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
int res = 0;
/*
res = LIS3DH_CheckDeviceID(client);
if(res != LIS3DH_SUCCESS)
{
return res;
}
*/
// first clear reg1
res = hwmsen_write_byte(client,LIS3DH_REG_CTL_REG1,0x07);
if(res != LIS3DH_SUCCESS)
{
return res;
}
res = LIS3DH_SetPowerMode(client, false);
if(res != LIS3DH_SUCCESS)
{
return res;
}
res = LIS3DH_SetBWRate(client, LIS3DH_BW_100HZ);//400 or 100 no other choice
if(res != LIS3DH_SUCCESS )
{
return res;
}
res = LIS3DH_SetDataFormat(client, LIS3DH_RANGE_2G);//8g or 2G no oher choise
if(res != LIS3DH_SUCCESS)
{
return res;
}
gsensor_gain.x = gsensor_gain.y = gsensor_gain.z = obj->reso->sensitivity;
res = LIS3DH_SetIntEnable(client, false);
if(res != LIS3DH_SUCCESS)
{
return res;
}
if(0 != reset_cali)
{
//reset calibration only in power on
res = LIS3DH_ResetCalibration(client);
if(res != LIS3DH_SUCCESS)
{
return res;
}
}
#ifdef CONFIG_LIS3DH_LOWPASS
memset(&obj->fir, 0x00, sizeof(obj->fir));
#endif
return LIS3DH_SUCCESS;
}
/*----------------------------------------------------------------------------*/
static int LIS3DH_ReadChipInfo(struct i2c_client *client, char *buf, int bufsize)
{
u8 databuf[10];
memset(databuf, 0, sizeof(u8)*10);
if((NULL == buf)||(bufsize<=30))
{
return -1;
}
if(NULL == client)
{
*buf = 0;
return -2;
}
sprintf(buf, "LIS3DH Chip");
return 0;
}
/*----------------------------------------------------------------------------*/
static int LIS3DH_ReadSensorData(struct i2c_client *client, char *buf, int bufsize)
{
struct lis3dh_i2c_data *obj = (struct lis3dh_i2c_data*)i2c_get_clientdata(client);
u8 databuf[20];
int acc[LIS3DH_AXES_NUM];
int res = 0;
memset(databuf, 0, sizeof(u8)*10);
if(NULL == buf)
{
return -1;
}
if(NULL == client)
{
*buf = 0;
return -2;
}
if(sensor_power == FALSE)
{
res = LIS3DH_SetPowerMode(client, true);
if(res)
{
GSE_ERR("Power on lis3dh error %d!\n", res);
}
msleep(20);
}
if((res = LIS3DH_ReadData(client, obj->data)))
{
GSE_ERR("I2C error: ret value=%d", res);
return -3;
}
else
{
obj->data[LIS3DH_AXIS_X] += obj->cali_sw[LIS3DH_AXIS_X];
obj->data[LIS3DH_AXIS_Y] += obj->cali_sw[LIS3DH_AXIS_Y];
obj->data[LIS3DH_AXIS_Z] += obj->cali_sw[LIS3DH_AXIS_Z];
/*remap coordinate*/
acc[obj->cvt.map[LIS3DH_AXIS_X]] = obj->cvt.sign[LIS3DH_AXIS_X]*obj->data[LIS3DH_AXIS_X];
acc[obj->cvt.map[LIS3DH_AXIS_Y]] = obj->cvt.sign[LIS3DH_AXIS_Y]*obj->data[LIS3DH_AXIS_Y];
acc[obj->cvt.map[LIS3DH_AXIS_Z]] = obj->cvt.sign[LIS3DH_AXIS_Z]*obj->data[LIS3DH_AXIS_Z];
//GSE_LOG("Mapped gsensor data: %d, %d, %d!\n", acc[LIS3DH_AXIS_X], acc[LIS3DH_AXIS_Y], acc[LIS3DH_AXIS_Z]);
//Out put the mg
acc[LIS3DH_AXIS_X] = acc[LIS3DH_AXIS_X] * GRAVITY_EARTH_1000 / obj->reso->sensitivity;
acc[LIS3DH_AXIS_Y] = acc[LIS3DH_AXIS_Y] * GRAVITY_EARTH_1000 / obj->reso->sensitivity;
acc[LIS3DH_AXIS_Z] = acc[LIS3DH_AXIS_Z] * GRAVITY_EARTH_1000 / obj->reso->sensitivity;
sprintf(buf, "%04x %04x %04x", acc[LIS3DH_AXIS_X], acc[LIS3DH_AXIS_Y], acc[LIS3DH_AXIS_Z]);
if(atomic_read(&obj->trace) & ADX_TRC_IOCTL)//atomic_read(&obj->trace) & ADX_TRC_IOCTL
{
GSE_LOG("gsensor data: %s!\n", buf);
dumpReg(client);
}
}
return 0;
}
/*----------------------------------------------------------------------------*/
static int LIS3DH_ReadRawData(struct i2c_client *client, char *buf)
{
struct lis3dh_i2c_data *obj = (struct lis3dh_i2c_data*)i2c_get_clientdata(client);
int res = 0;
if (!buf || !client)
{
return EINVAL;
}
if((res = LIS3DH_ReadData(client, obj->data)))
{
GSE_ERR("I2C error: ret value=%d", res);
return EIO;
}
else
{
sprintf(buf, "%04x %04x %04x", obj->data[LIS3DH_AXIS_X],
obj->data[LIS3DH_AXIS_Y], obj->data[LIS3DH_AXIS_Z]);
}
return 0;
}
/*----------------------------------------------------------------------------*/
static ssize_t show_chipinfo_value(struct device_driver *ddri, char *buf)
{
struct i2c_client *client = lis3dh_i2c_client;
char strbuf[LIS3DH_BUFSIZE];
if(NULL == client)
{
GSE_ERR("i2c client is null!!\n");
return 0;
}
LIS3DH_ReadChipInfo(client, strbuf, LIS3DH_BUFSIZE);
return snprintf(buf, PAGE_SIZE, "%s\n", strbuf);
}
/*----------------------------------------------------------------------------*/
static ssize_t show_sensordata_value(struct device_driver *ddri, char *buf)
{
struct i2c_client *client = lis3dh_i2c_client;
char strbuf[LIS3DH_BUFSIZE];
if(NULL == client)
{
GSE_ERR("i2c client is null!!\n");
return 0;
}
LIS3DH_ReadSensorData(client, strbuf, LIS3DH_BUFSIZE);
return snprintf(buf, PAGE_SIZE, "%s\n", strbuf);
}
/*----------------------------------------------------------------------------*/
static ssize_t show_cali_value(struct device_driver *ddri, char *buf)
{
struct i2c_client *client = lis3dh_i2c_client;
struct lis3dh_i2c_data *obj;
int err, len = 0, mul;
int tmp[LIS3DH_AXES_NUM];
if(NULL == client)
{
GSE_ERR("i2c client is null!!\n");
return 0;
}
obj = i2c_get_clientdata(client);
if((err = LIS3DH_ReadCalibration(client, tmp)))
{
return -EINVAL;
}
else
{
mul = obj->reso->sensitivity/lis3dh_offset_resolution.sensitivity;
len += snprintf(buf+len, PAGE_SIZE-len, "[HW ][%d] (%+3d, %+3d, %+3d) : (0x%02X, 0x%02X, 0x%02X)\n", mul,
obj->offset[LIS3DH_AXIS_X], obj->offset[LIS3DH_AXIS_Y], obj->offset[LIS3DH_AXIS_Z],
obj->offset[LIS3DH_AXIS_X], obj->offset[LIS3DH_AXIS_Y], obj->offset[LIS3DH_AXIS_Z]);
len += snprintf(buf+len, PAGE_SIZE-len, "[SW ][%d] (%+3d, %+3d, %+3d)\n", 1,
obj->cali_sw[LIS3DH_AXIS_X], obj->cali_sw[LIS3DH_AXIS_Y], obj->cali_sw[LIS3DH_AXIS_Z]);
len += snprintf(buf+len, PAGE_SIZE-len, "[ALL] (%+3d, %+3d, %+3d) : (%+3d, %+3d, %+3d)\n",
obj->offset[LIS3DH_AXIS_X]*mul + obj->cali_sw[LIS3DH_AXIS_X],
obj->offset[LIS3DH_AXIS_Y]*mul + obj->cali_sw[LIS3DH_AXIS_Y],
obj->offset[LIS3DH_AXIS_Z]*mul + obj->cali_sw[LIS3DH_AXIS_Z],
tmp[LIS3DH_AXIS_X], tmp[LIS3DH_AXIS_Y], tmp[LIS3DH_AXIS_Z]);
return len;
}
}
/*----------------------------------------------------------------------------*/
static ssize_t store_cali_value(struct device_driver *ddri, const char *buf, size_t count)
{
struct i2c_client *client = lis3dh_i2c_client;
int err, x, y, z;
int dat[LIS3DH_AXES_NUM];
if(!strncmp(buf, "rst", 3))
{
if((err = LIS3DH_ResetCalibration(client)))
{
GSE_ERR("reset offset err = %d\n", err);
}
}
else if(3 == sscanf(buf, "0x%02X 0x%02X 0x%02X", &x, &y, &z))
{
dat[LIS3DH_AXIS_X] = x;
dat[LIS3DH_AXIS_Y] = y;
dat[LIS3DH_AXIS_Z] = z;
if((err = LIS3DH_WriteCalibration(client, dat)))
{
GSE_ERR("write calibration err = %d\n", err);
}
}
else
{
GSE_ERR("invalid format\n");
}
return count;
}
/*----------------------------------------------------------------------------*/
static ssize_t show_power_status(struct device_driver *ddri, char *buf)
{
struct i2c_client *client = lis3dh_i2c_client;
struct lis3dh_i2c_data *obj;
u8 data;
if(NULL == client)
{
GSE_ERR("i2c client is null!!\n");
return 0;
}
obj = i2c_get_clientdata(client);
hwmsen_read_byte(client,LIS3DH_REG_CTL_REG1,&data);
data &= 0x08;
data = data>>3;
return snprintf(buf, PAGE_SIZE, "%x\n", data);
}
/*----------------------------------------------------------------------------*/
static ssize_t show_firlen_value(struct device_driver *ddri, char *buf)
{
#ifdef CONFIG_LIS3DH_LOWPASS
struct i2c_client *client = lis3dh_i2c_client;
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
if(atomic_read(&obj->firlen))
{
int idx, len = atomic_read(&obj->firlen);
GSE_LOG("len = %2d, idx = %2d\n", obj->fir.num, obj->fir.idx);
for(idx = 0; idx < len; idx++)
{
GSE_LOG("[%5d %5d %5d]\n", obj->fir.raw[idx][LIS3DH_AXIS_X], obj->fir.raw[idx][LIS3DH_AXIS_Y], obj->fir.raw[idx][LIS3DH_AXIS_Z]);
}
GSE_LOG("sum = [%5d %5d %5d]\n", obj->fir.sum[LIS3DH_AXIS_X], obj->fir.sum[LIS3DH_AXIS_Y], obj->fir.sum[LIS3DH_AXIS_Z]);
GSE_LOG("avg = [%5d %5d %5d]\n", obj->fir.sum[LIS3DH_AXIS_X]/len, obj->fir.sum[LIS3DH_AXIS_Y]/len, obj->fir.sum[LIS3DH_AXIS_Z]/len);
}
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&obj->firlen));
#else
return snprintf(buf, PAGE_SIZE, "not support\n");
#endif
}
/*----------------------------------------------------------------------------*/
static ssize_t store_firlen_value(struct device_driver *ddri, const char *buf, size_t count)
{
#ifdef CONFIG_LIS3DH_LOWPASS
struct i2c_client *client = lis3dh_i2c_client;
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
int firlen;
if(1 != sscanf(buf, "%d", &firlen))
{
GSE_ERR("invallid format\n");
}
else if(firlen > C_MAX_FIR_LENGTH)
{
GSE_ERR("exceeds maximum filter length\n");
}
else
{
atomic_set(&obj->firlen, firlen);
if(0 == firlen)//yucong fix build warning
{
atomic_set(&obj->fir_en, 0);
}
else
{
memset(&obj->fir, 0x00, sizeof(obj->fir));
atomic_set(&obj->fir_en, 1);
}
}
#endif
return count;
}
/*----------------------------------------------------------------------------*/
static ssize_t show_trace_value(struct device_driver *ddri, char *buf)
{
ssize_t res;
struct lis3dh_i2c_data *obj = obj_i2c_data;
if (obj == NULL)
{
GSE_ERR("i2c_data obj is null!!\n");
return 0;
}
res = snprintf(buf, PAGE_SIZE, "0x%04X\n", atomic_read(&obj->trace));
return res;
}
/*----------------------------------------------------------------------------*/
static ssize_t store_trace_value(struct device_driver *ddri, const char *buf, size_t count)
{
struct lis3dh_i2c_data *obj = obj_i2c_data;
int trace;
if (obj == NULL)
{
GSE_ERR("i2c_data obj is null!!\n");
return 0;
}
if(1 == sscanf(buf, "0x%x", &trace))
{
atomic_set(&obj->trace, trace);
}
else
{
GSE_ERR("invalid content: '%s', length = %d\n", buf, count);
}
return count;
}
/*----------------------------------------------------------------------------*/
static ssize_t show_status_value(struct device_driver *ddri, char *buf)
{
ssize_t len = 0;
struct lis3dh_i2c_data *obj = obj_i2c_data;
if (obj == NULL)
{
GSE_ERR("i2c_data obj is null!!\n");
return 0;
}
if(obj->hw)
{
len += snprintf(buf+len, PAGE_SIZE-len, "CUST: %d %d (%d %d)\n",
obj->hw->i2c_num, obj->hw->direction, obj->hw->power_id, obj->hw->power_vol);
}
else
{
len += snprintf(buf+len, PAGE_SIZE-len, "CUST: NULL\n");
}
return len;
}
/*----------------------------------------------------------------------------*/
static DRIVER_ATTR(chipinfo, S_IRUGO, show_chipinfo_value, NULL);
static DRIVER_ATTR(sensordata, S_IRUGO, show_sensordata_value, NULL);
static DRIVER_ATTR(cali, S_IWUSR | S_IRUGO, show_cali_value, store_cali_value);
static DRIVER_ATTR(power, S_IRUGO, show_power_status, NULL);
static DRIVER_ATTR(firlen, S_IWUSR | S_IRUGO, show_firlen_value, store_firlen_value);
static DRIVER_ATTR(trace, S_IWUSR | S_IRUGO, show_trace_value, store_trace_value);
static DRIVER_ATTR(status, S_IRUGO, show_status_value, NULL);
/*----------------------------------------------------------------------------*/
static struct driver_attribute *lis3dh_attr_list[] = {
&driver_attr_chipinfo, /*chip information*/
&driver_attr_sensordata, /*dump sensor data*/
&driver_attr_cali, /*show calibration data*/
&driver_attr_power, /*show power reg*/
&driver_attr_firlen, /*filter length: 0: disable, others: enable*/
&driver_attr_trace, /*trace log*/
&driver_attr_status,
};
/*----------------------------------------------------------------------------*/
static int lis3dh_create_attr(struct device_driver *driver)
{
int idx, err = 0;
int num = (int)(sizeof(lis3dh_attr_list)/sizeof(lis3dh_attr_list[0]));
if (driver == NULL)
{
return -EINVAL;
}
for(idx = 0; idx < num; idx++)
{
if((err = driver_create_file(driver, lis3dh_attr_list[idx])))
{
GSE_ERR("driver_create_file (%s) = %d\n", lis3dh_attr_list[idx]->attr.name, err);
break;
}
}
return err;
}
/*----------------------------------------------------------------------------*/
static int lis3dh_delete_attr(struct device_driver *driver)
{
int idx ,err = 0;
int num = (int)(sizeof(lis3dh_attr_list)/sizeof(lis3dh_attr_list[0]));
if(driver == NULL)
{
return -EINVAL;
}
for(idx = 0; idx < num; idx++)
{
driver_remove_file(driver, lis3dh_attr_list[idx]);
}
return err;
}
/*----------------------------------------------------------------------------*/
int lis3dh_operate(void* self, uint32_t command, void* buff_in, int size_in,
void* buff_out, int size_out, int* actualout)
{
int err = 0;
int value, sample_delay;
struct lis3dh_i2c_data *priv = (struct lis3dh_i2c_data*)self;
hwm_sensor_data* gsensor_data;
char buff[LIS3DH_BUFSIZE];
//GSE_FUN(f);
switch (command)
{
case SENSOR_DELAY:
if((buff_in == NULL) || (size_in < sizeof(int)))
{
GSE_ERR("Set delay parameter error!\n");
err = -EINVAL;
}
else
{
value = *(int *)buff_in;
if(value <= 5)
{
sample_delay = LIS3DH_BW_200HZ;
}
else if(value <= 10)
{
sample_delay = ~LIS3DH_BW_100HZ;
}
else
{
sample_delay = ~LIS3DH_BW_50HZ;
}
err = LIS3DH_SetBWRate(priv->client, sample_delay);
if(err != LIS3DH_SUCCESS ) //0x2C->BW=100Hz
{
GSE_ERR("Set delay parameter error!\n");
}
if(value >= 50)
{
atomic_set(&priv->filter, 0);
}
else
{
priv->fir.num = 0;
priv->fir.idx = 0;
priv->fir.sum[LIS3DH_AXIS_X] = 0;
priv->fir.sum[LIS3DH_AXIS_Y] = 0;
priv->fir.sum[LIS3DH_AXIS_Z] = 0;
atomic_set(&priv->filter, 1);
}
}
break;
case SENSOR_ENABLE:
if((buff_in == NULL) || (size_in < sizeof(int)))
{
GSE_ERR("Enable sensor parameter error!\n");
err = -EINVAL;
}
else
{
value = *(int *)buff_in;
GSE_LOG("enable value=%d, sensor_power =%d\n",value,sensor_power);
if(((value == 0) && (sensor_power == false)) ||((value == 1) && (sensor_power == true)))
{
GSE_LOG("Gsensor device have updated!\n");
}
else
{
err = LIS3DH_SetPowerMode( priv->client, !sensor_power);
}
}
break;
case SENSOR_GET_DATA:
if((buff_out == NULL) || (size_out< sizeof(hwm_sensor_data)))
{
GSE_ERR("get sensor data parameter error!\n");
err = -EINVAL;
}
else
{
gsensor_data = (hwm_sensor_data *)buff_out;
LIS3DH_ReadSensorData(priv->client, buff, LIS3DH_BUFSIZE);
sscanf(buff, "%x %x %x", &gsensor_data->values[0],
&gsensor_data->values[1], &gsensor_data->values[2]);
gsensor_data->status = SENSOR_STATUS_ACCURACY_MEDIUM;
gsensor_data->value_divide = 1000;
}
break;
default:
GSE_ERR("gsensor operate function no this parameter %d!\n", command);
err = -1;
break;
}
return err;
}
/******************************************************************************
* Function Configuration
******************************************************************************/
static int lis3dh_open(struct inode *inode, struct file *file)
{
file->private_data = lis3dh_i2c_client;
if(file->private_data == NULL)
{
GSE_ERR("null pointer!!\n");
return -EINVAL;
}
return nonseekable_open(inode, file);
}
/*----------------------------------------------------------------------------*/
static int lis3dh_release(struct inode *inode, struct file *file)
{
file->private_data = NULL;
return 0;
}
/*----------------------------------------------------------------------------*/
//static int lis3dh_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
// unsigned long arg)
static long lis3dh_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct i2c_client *client = (struct i2c_client*)file->private_data;
struct lis3dh_i2c_data *obj = (struct lis3dh_i2c_data*)i2c_get_clientdata(client);
char strbuf[LIS3DH_BUFSIZE];
void __user *data;
SENSOR_DATA sensor_data;
long err = 0;
int cali[3];
//GSE_FUN(f);
if(_IOC_DIR(cmd) & _IOC_READ)
{
err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
}
else if(_IOC_DIR(cmd) & _IOC_WRITE)
{
err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
}
if(err)
{
GSE_ERR("access error: %08X, (%2d, %2d)\n", cmd, _IOC_DIR(cmd), _IOC_SIZE(cmd));
return -EFAULT;
}
switch(cmd)
{
case GSENSOR_IOCTL_INIT:
LIS3DH_Init(client, 0);
break;
case GSENSOR_IOCTL_READ_CHIPINFO:
data = (void __user *) arg;
if(data == NULL)
{
err = -EINVAL;
break;
}
LIS3DH_ReadChipInfo(client, strbuf, LIS3DH_BUFSIZE);
if(copy_to_user(data, strbuf, strlen(strbuf)+1))
{
err = -EFAULT;
break;
}
break;
case GSENSOR_IOCTL_READ_SENSORDATA:
data = (void __user *) arg;
if(data == NULL)
{
err = -EINVAL;
break;
}
LIS3DH_ReadSensorData(client, strbuf, LIS3DH_BUFSIZE);
if(copy_to_user(data, strbuf, strlen(strbuf)+1))
{
err = -EFAULT;
break;
}
break;
case GSENSOR_IOCTL_READ_GAIN:
data = (void __user *) arg;
if(data == NULL)
{
err = -EINVAL;
break;
}
if(copy_to_user(data, &gsensor_gain, sizeof(GSENSOR_VECTOR3D)))
{
err = -EFAULT;
break;
}
break;
case GSENSOR_IOCTL_READ_OFFSET:
data = (void __user *) arg;
if(data == NULL)
{
err = -EINVAL;
break;
}
if(copy_to_user(data, &gsensor_offset, sizeof(GSENSOR_VECTOR3D)))
{
err = -EFAULT;
break;
}
break;
case GSENSOR_IOCTL_READ_RAW_DATA:
data = (void __user *) arg;
if(data == NULL)
{
err = -EINVAL;
break;
}
LIS3DH_ReadRawData(client, strbuf);
if(copy_to_user(data, &strbuf, strlen(strbuf)+1))
{
err = -EFAULT;
break;
}
break;
case GSENSOR_IOCTL_SET_CALI:
data = (void __user*)arg;
if(data == NULL)
{
err = -EINVAL;
break;
}
if(copy_from_user(&sensor_data, data, sizeof(sensor_data)))
{
err = -EFAULT;
break;
}
if(atomic_read(&obj->suspend))
{
GSE_ERR("Perform calibration in suspend state!!\n");
err = -EINVAL;
}
else
{
cali[LIS3DH_AXIS_X] = sensor_data.x * obj->reso->sensitivity / GRAVITY_EARTH_1000;
cali[LIS3DH_AXIS_Y] = sensor_data.y * obj->reso->sensitivity / GRAVITY_EARTH_1000;
cali[LIS3DH_AXIS_Z] = sensor_data.z * obj->reso->sensitivity / GRAVITY_EARTH_1000;
err = LIS3DH_WriteCalibration(client, cali);
}
break;
case GSENSOR_IOCTL_CLR_CALI:
err = LIS3DH_ResetCalibration(client);
break;
case GSENSOR_IOCTL_GET_CALI:
data = (void __user*)arg;
if(data == NULL)
{
err = -EINVAL;
break;
}
if((err = LIS3DH_ReadCalibration(client, cali)))
{
break;
}
sensor_data.x = cali[LIS3DH_AXIS_X] * GRAVITY_EARTH_1000 / obj->reso->sensitivity;
sensor_data.y = cali[LIS3DH_AXIS_Y] * GRAVITY_EARTH_1000 / obj->reso->sensitivity;
sensor_data.z = cali[LIS3DH_AXIS_Z] * GRAVITY_EARTH_1000 / obj->reso->sensitivity;
if(copy_to_user(data, &sensor_data, sizeof(sensor_data)))
{
err = -EFAULT;
break;
}
break;
default:
GSE_ERR("unknown IOCTL: 0x%08x\n", cmd);
err = -ENOIOCTLCMD;
break;
}
return err;
}
/*----------------------------------------------------------------------------*/
static struct file_operations lis3dh_fops = {
.owner = THIS_MODULE,
.open = lis3dh_open,
.release = lis3dh_release,
//.ioctl = lis3dh_ioctl,
.unlocked_ioctl = lis3dh_unlocked_ioctl,
};
/*----------------------------------------------------------------------------*/
static struct miscdevice lis3dh_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "gsensor",
.fops = &lis3dh_fops,
};
/*----------------------------------------------------------------------------*/
#ifndef CONFIG_HAS_EARLYSUSPEND
/*----------------------------------------------------------------------------*/
static int lis3dh_suspend(struct i2c_client *client, pm_message_t msg)
{
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
int err = 0;
u8 dat;
GSE_FUN();
if(msg.event == PM_EVENT_SUSPEND)
{
if(obj == NULL)
{
GSE_ERR("null pointer!!\n");
return -EINVAL;
}
//read old data
if ((err = hwmsen_read_byte(client, LIS3DH_REG_CTL_REG1, &dat)))
{
GSE_ERR("write data format fail!!\n");
return err;
}
dat = dat&0b10111111;
atomic_set(&obj->suspend, 1);
if(err = hwmsen_write_byte(client, LIS3DH_REG_CTL_REG1, dat))
{
GSE_ERR("write power control fail!!\n");
return err;
}
LIS3DH_power(obj->hw, 0);
}
return err;
}
/*----------------------------------------------------------------------------*/
static int lis3dh_resume(struct i2c_client *client)
{
struct lis3dh_i2c_data *obj = i2c_get_clientdata(client);
//int err;
GSE_FUN();
if(obj == NULL)
{
GSE_ERR("null pointer!!\n");
return -EINVAL;
}
LIS3DH_power(obj->hw, 1);
#if 0
mdelay(30);//yucong add for fix g sensor resume issue
if(err = LIS3DH_Init(client, 0))
{
GSE_ERR("initialize client fail!!\n");
return err;
}
#endif
atomic_set(&obj->suspend, 0);
return 0;
}
/*----------------------------------------------------------------------------*/
#else /*CONFIG_HAS_EARLY_SUSPEND is defined*/
/*----------------------------------------------------------------------------*/
static void lis3dh_early_suspend(struct early_suspend *h)
{
struct lis3dh_i2c_data *obj = container_of(h, struct lis3dh_i2c_data, early_drv);
int err;
GSE_FUN();
if(obj == NULL)
{
GSE_ERR("null pointer!!\n");
return;
}
atomic_set(&obj->suspend, 1);
/*
if(err = hwmsen_write_byte(obj->client, LIS3DH_REG_POWER_CTL, 0x00))
{
GSE_ERR("write power control fail!!\n");
return;
}
*/
if((err = LIS3DH_SetPowerMode(obj->client, false)))
{
GSE_ERR("write power control fail!!\n");
return;
}
sensor_power = false;
LIS3DH_power(obj->hw, 0);
}
/*----------------------------------------------------------------------------*/
static void lis3dh_late_resume(struct early_suspend *h)
{
struct lis3dh_i2c_data *obj = container_of(h, struct lis3dh_i2c_data, early_drv);
//int err;
GSE_FUN();
if(obj == NULL)
{
GSE_ERR("null pointer!!\n");
return;
}
LIS3DH_power(obj->hw, 1);
#if 0
mdelay(30);//yucong add for fix g sensor resume issue
if((err = LIS3DH_Init(obj->client, 0)))
{
GSE_ERR("initialize client fail!!\n");
return;
}
#endif
atomic_set(&obj->suspend, 0);
}
/*----------------------------------------------------------------------------*/
#endif /*CONFIG_HAS_EARLYSUSPEND*/
/*----------------------------------------------------------------------------*/
/*
static int lis3dh_i2c_detect(struct i2c_client *client, int kind, struct i2c_board_info *info)
{
strcpy(info->type, LIS3DH_DEV_NAME);
return 0;
}
*/
/*----------------------------------------------------------------------------*/
static int lis3dh_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct i2c_client *new_client;
struct lis3dh_i2c_data *obj;
struct hwmsen_object sobj;
int err = 0;
GSE_FUN();
if(!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
{
err = -ENOMEM;
goto exit;
}
memset(obj, 0, sizeof(struct lis3dh_i2c_data));
obj->hw = lis3dh_get_cust_acc_hw();
if((err = hwmsen_get_convert(obj->hw->direction, &obj->cvt)))
{
GSE_ERR("invalid direction: %d\n", obj->hw->direction);
goto exit;
}
obj_i2c_data = obj;
obj->client = client;
new_client = obj->client;
i2c_set_clientdata(new_client,obj);
atomic_set(&obj->trace, 0);
atomic_set(&obj->suspend, 0);
#ifdef CONFIG_LIS3DH_LOWPASS
if(obj->hw->firlen > C_MAX_FIR_LENGTH)
{
atomic_set(&obj->firlen, C_MAX_FIR_LENGTH);
}
else
{
atomic_set(&obj->firlen, obj->hw->firlen);
}
if(atomic_read(&obj->firlen) > 0)
{
atomic_set(&obj->fir_en, 1);
}
#endif
lis3dh_i2c_client = new_client;
if((err = LIS3DH_Init(new_client, 1)))
{
goto exit_init_failed;
}
if((err = misc_register(&lis3dh_device)))
{
GSE_ERR("lis3dh_device register failed\n");
goto exit_misc_device_register_failed;
}
if((err = lis3dh_create_attr(&(lis3dh_init_info.platform_diver_addr->driver))))
{
GSE_ERR("create attribute err = %d\n", err);
goto exit_create_attr_failed;
}
sobj.self = obj;
sobj.polling = 1;
sobj.sensor_operate = lis3dh_operate;
if((err = hwmsen_attach(ID_ACCELEROMETER, &sobj)))
{
GSE_ERR("attach fail = %d\n", err);
goto exit_kfree;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
obj->early_drv.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1,
obj->early_drv.suspend = lis3dh_early_suspend,
obj->early_drv.resume = lis3dh_late_resume,
register_early_suspend(&obj->early_drv);
#endif
GSE_LOG("%s: OK\n", __func__);
lis3dh_init_flag = 0;
return 0;
exit_create_attr_failed:
misc_deregister(&lis3dh_device);
exit_misc_device_register_failed:
exit_init_failed:
//i2c_detach_client(new_client);
exit_kfree:
kfree(obj);
exit:
GSE_ERR("%s: err = %d\n", __func__, err);
lis3dh_init_flag = -1;
return err;
}
/*----------------------------------------------------------------------------*/
static int lis3dh_i2c_remove(struct i2c_client *client)
{
int err = 0;
if((err = lis3dh_delete_attr(&(lis3dh_init_info.platform_diver_addr->driver))))
{
GSE_ERR("lis3dh_delete_attr fail: %d\n", err);
}
if((err = misc_deregister(&lis3dh_device)))
{
GSE_ERR("misc_deregister fail: %d\n", err);
}
if((err = hwmsen_detach(ID_ACCELEROMETER)))
lis3dh_i2c_client = NULL;
i2c_unregister_device(client);
kfree(i2c_get_clientdata(client));
return 0;
}
/*----------------------------------------------------------------------------*/
#if 0
/*----------------------------------------------------------------------------*/
static int lis3dh_probe(struct platform_device *pdev)
{
struct acc_hw *hw = get_cust_acc_hw();
GSE_FUN();
LIS3DH_power(hw, 1);
//lis3dh_force[0] = hw->i2c_num;
if(i2c_add_driver(&lis3dh_i2c_driver))
{
GSE_ERR("add driver error\n");
return -1;
}
return 0;
}
/*----------------------------------------------------------------------------*/
static int lis3dh_remove(struct platform_device *pdev)
{
struct acc_hw *hw = get_cust_acc_hw();
GSE_FUN();
LIS3DH_power(hw, 0);
i2c_del_driver(&lis3dh_i2c_driver);
return 0;
}
/*----------------------------------------------------------------------------*/
static struct platform_driver lis3dh_gsensor_driver = {
.probe = lis3dh_probe,
.remove = lis3dh_remove,
.driver = {
.name = "gsensor",
.owner = THIS_MODULE,
}
};
/*----------------------------------------------------------------------------*/
#endif
/*----------------------------------------------------------------------------*/
static int lis3dh_remove(void)
{
struct acc_hw *hw = lis3dh_get_cust_acc_hw();
GSE_FUN();
LIS3DH_power(hw, 0);
i2c_del_driver(&lis3dh_i2c_driver);
return 0;
}
/*----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------*/
static int lis3dh_local_init(void)
{
struct acc_hw *hw = lis3dh_get_cust_acc_hw();
GSE_FUN();
LIS3DH_power(hw, 1);
if(i2c_add_driver(&lis3dh_i2c_driver))
{
GSE_ERR("add driver error\n");
return -1;
}
if(-1 == lis3dh_init_flag)
{
return -1;
}
return 0;
}
/*----------------------------------------------------------------------------*/
static int __init lis3dh_init(void)
{
GSE_FUN();
i2c_register_board_info(0, &i2c_LIS3DH, 1);
hwmsen_gsensor_add(&lis3dh_init_info);
#if 0
if(platform_driver_register(&lis3dh_gsensor_driver))
{
GSE_ERR("failed to register driver");
return -ENODEV;
}
#endif
return 0;
}
/*----------------------------------------------------------------------------*/
static void __exit lis3dh_exit(void)
{
GSE_FUN();
//platform_driver_unregister(&lis3dh_gsensor_driver);
}
/*----------------------------------------------------------------------------*/
module_init(lis3dh_init);
module_exit(lis3dh_exit);
/*----------------------------------------------------------------------------*/
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LIS3DH I2C driver");
MODULE_AUTHOR("Chunlei.Wang@mediatek.com");
| yevgeniy-logachev/CATB15Kernel | mediatek/custom/common/kernel/accelerometer/lis3dh_auto/lis3dh.c | C | gpl-2.0 | 49,830 |
/*
FILE
tTemplate source code of unxsISP.cgi
Built by mysqlRAD2.cgi (C) Gary Wallis and Hugo Urquiza 2001-2009
svn ID removed
PURPOSE
Schema dependent RAD generated file.
Program app functionality in ttemplatefunc.h while
RAD is still to be used.
*/
#include "mysqlrad.h"
//Table Variables
//Table Variables
//uTemplate: Primary Key
static unsigned uTemplate=0;
//cLabel: Short label
static char cLabel[33]={""};
//uTemplateSet: Short label
static unsigned uTemplateSet=0;
static char cuTemplateSetPullDown[256]={""};
//uTemplateType: Short label
static unsigned uTemplateType=0;
static char cuTemplateTypePullDown[256]={""};
//cComment: About the template
static char *cComment={""};
//cTemplate: Template itself
static char *cTemplate={""};
//uOwner: Record owner
static unsigned uOwner=0;
//uCreatedBy: uClient for last insert
static unsigned uCreatedBy=0;
#define ISM3FIELDS
//uCreatedDate: Unix seconds date last insert
static time_t uCreatedDate=0;
//uModBy: uClient for last update
static unsigned uModBy=0;
//uModDate: Unix seconds date last update
static time_t uModDate=0;
#define VAR_LIST_tTemplate "tTemplate.uTemplate,tTemplate.cLabel,tTemplate.uTemplateSet,tTemplate.uTemplateType,tTemplate.cComment,tTemplate.cTemplate,tTemplate.uOwner,tTemplate.uCreatedBy,tTemplate.uCreatedDate,tTemplate.uModBy,tTemplate.uModDate"
//Local only
void Insert_tTemplate(void);
void Update_tTemplate(char *cRowid);
void ProcesstTemplateListVars(pentry entries[], int x);
//In tTemplatefunc.h file included below
void ExtProcesstTemplateVars(pentry entries[], int x);
void ExttTemplateCommands(pentry entries[], int x);
void ExttTemplateButtons(void);
void ExttTemplateNavBar(void);
void ExttTemplateGetHook(entry gentries[], int x);
void ExttTemplateSelect(void);
void ExttTemplateSelectRow(void);
void ExttTemplateListSelect(void);
void ExttTemplateListFilter(void);
void ExttTemplateAuxTable(void);
#include "ttemplatefunc.h"
//Table Variables Assignment Function
void ProcesstTemplateVars(pentry entries[], int x)
{
register int i;
for(i=0;i<x;i++)
{
if(!strcmp(entries[i].name,"uTemplate"))
sscanf(entries[i].val,"%u",&uTemplate);
else if(!strcmp(entries[i].name,"cLabel"))
sprintf(cLabel,"%.32s",entries[i].val);
else if(!strcmp(entries[i].name,"uTemplateSet"))
sscanf(entries[i].val,"%u",&uTemplateSet);
else if(!strcmp(entries[i].name,"cuTemplateSetPullDown"))
{
sprintf(cuTemplateSetPullDown,"%.255s",entries[i].val);
uTemplateSet=ReadPullDown("tTemplateSet","cLabel",cuTemplateSetPullDown);
}
else if(!strcmp(entries[i].name,"uTemplateType"))
sscanf(entries[i].val,"%u",&uTemplateType);
else if(!strcmp(entries[i].name,"cuTemplateTypePullDown"))
{
sprintf(cuTemplateTypePullDown,"%.255s",entries[i].val);
uTemplateType=ReadPullDown("tTemplateType","cLabel",cuTemplateTypePullDown);
}
else if(!strcmp(entries[i].name,"cComment"))
cComment=entries[i].val;
else if(!strcmp(entries[i].name,"cTemplate"))
cTemplate=entries[i].val;
else if(!strcmp(entries[i].name,"uOwner"))
sscanf(entries[i].val,"%u",&uOwner);
else if(!strcmp(entries[i].name,"uCreatedBy"))
sscanf(entries[i].val,"%u",&uCreatedBy);
else if(!strcmp(entries[i].name,"uCreatedDate"))
sscanf(entries[i].val,"%lu",&uCreatedDate);
else if(!strcmp(entries[i].name,"uModBy"))
sscanf(entries[i].val,"%u",&uModBy);
else if(!strcmp(entries[i].name,"uModDate"))
sscanf(entries[i].val,"%lu",&uModDate);
}
//After so we can overwrite form data if needed.
ExtProcesstTemplateVars(entries,x);
}//ProcesstTemplateVars()
void ProcesstTemplateListVars(pentry entries[], int x)
{
register int i;
for(i=0;i<x;i++)
{
if(!strncmp(entries[i].name,"ED",2))
{
sscanf(entries[i].name+2,"%u",&uTemplate);
guMode=2002;
tTemplate("");
}
}
}//void ProcesstTemplateListVars(pentry entries[], int x)
int tTemplateCommands(pentry entries[], int x)
{
ProcessControlVars(entries,x);
ExttTemplateCommands(entries,x);
if(!strcmp(gcFunction,"tTemplateTools"))
{
if(!strcmp(gcFind,LANG_NB_LIST))
{
tTemplateList();
}
//Default
ProcesstTemplateVars(entries,x);
tTemplate("");
}
else if(!strcmp(gcFunction,"tTemplateList"))
{
ProcessControlVars(entries,x);
ProcesstTemplateListVars(entries,x);
tTemplateList();
}
return(0);
}//tTemplateCommands()
void tTemplate(const char *cResult)
{
MYSQL_RES *res;
MYSQL_RES *res2;
MYSQL_ROW field;
//Internal skip reloading
if(!cResult[0])
{
if(guMode)
ExttTemplateSelectRow();
else
ExttTemplateSelect();
mysql_query(&gMysql,gcQuery);
if(mysql_errno(&gMysql))
{
if(strstr(mysql_error(&gMysql)," doesn't exist"))
{
CreatetTemplate();
unxsISP("New tTemplate table created");
}
else
{
htmlPlainTextError(mysql_error(&gMysql));
}
}
res=mysql_store_result(&gMysql);
if((guI=mysql_num_rows(res)))
{
if(guMode==6)
{
sprintf(gcQuery,"SELECT _rowid FROM tTemplate WHERE uTemplate=%u"
,uTemplate);
mysql_query(&gMysql,gcQuery);
res2=mysql_store_result(&gMysql);
field=mysql_fetch_row(res2);
sscanf(field[0],"%lu",&gluRowid);
gluRowid++;
}
PageMachine("",0,"");
if(!guMode) mysql_data_seek(res,gluRowid-1);
field=mysql_fetch_row(res);
sscanf(field[0],"%u",&uTemplate);
sprintf(cLabel,"%.32s",field[1]);
sscanf(field[2],"%u",&uTemplateSet);
sscanf(field[3],"%u",&uTemplateType);
cComment=field[4];
cTemplate=field[5];
sscanf(field[6],"%u",&uOwner);
sscanf(field[7],"%u",&uCreatedBy);
sscanf(field[8],"%lu",&uCreatedDate);
sscanf(field[9],"%u",&uModBy);
sscanf(field[10],"%lu",&uModDate);
}
}//Internal Skip
Header_ism3(":: tTemplate",1);
printf("<table width=100%% cellspacing=0 cellpadding=0>\n");
printf("<tr><td colspan=2 align=right valign=center>");
printf("<input type=hidden name=gcFunction value=tTemplateTools>");
printf("<input type=hidden name=gluRowid value=%lu>",gluRowid);
if(guI)
{
if(guMode==6)
//printf(" Found");
printf(LANG_NBR_FOUND);
else if(guMode==5)
//printf(" Modified");
printf(LANG_NBR_MODIFIED);
else if(guMode==4)
//printf(" New");
printf(LANG_NBR_NEW);
printf(LANG_NBRF_SHOWING,gluRowid,guI);
}
else
{
if(!cResult[0])
//printf(" No records found");
printf(LANG_NBR_NORECS);
}
if(cResult[0]) printf("%s",cResult);
printf("</td></tr>");
printf("<tr><td valign=top width=25%%>");
ExttTemplateButtons();
printf("</td><td valign=top>");
//
OpenFieldSet("tTemplate Record Data",100);
if(guMode==2000 || guMode==2002)
tTemplateInput(1);
else
tTemplateInput(0);
//
CloseFieldSet();
//Bottom table
printf("<tr><td colspan=2>");
ExttTemplateAuxTable();
Footer_ism3();
}//end of tTemplate();
void tTemplateInput(unsigned uMode)
{
//uTemplate
OpenRow(LANG_FL_tTemplate_uTemplate,"black");
printf("<input title='%s' type=text name=uTemplate value=%u size=16 maxlength=10 "
,LANG_FT_tTemplate_uTemplate,uTemplate);
if(guPermLevel>=20 && uMode)
{
printf("></td></tr>\n");
}
else
{
printf("disabled></td></tr>\n");
printf("<input type=hidden name=uTemplate value=%u >\n",uTemplate);
}
//cLabel
OpenRow(LANG_FL_tTemplate_cLabel,"black");
printf("<input title='%s' type=text name=cLabel value=\"%s\" size=40 maxlength=32 "
,LANG_FT_tTemplate_cLabel,EncodeDoubleQuotes(cLabel));
if(guPermLevel>=7 && uMode)
{
printf("></td></tr>\n");
}
else
{
printf("disabled></td></tr>\n");
printf("<input type=hidden name=cLabel value=\"%s\">\n",EncodeDoubleQuotes(cLabel));
}
//uTemplateSet
OpenRow(LANG_FL_tTemplate_uTemplateSet,"black");
if(guPermLevel>=7 && uMode)
tTablePullDown("tTemplateSet;cuTemplateSetPullDown","cLabel","cLabel",uTemplateSet,1);
else
tTablePullDown("tTemplateSet;cuTemplateSetPullDown","cLabel","cLabel",uTemplateSet,0);
//uTemplateType
OpenRow(LANG_FL_tTemplate_uTemplateType,"black");
if(guPermLevel>=7 && uMode)
tTablePullDown("tTemplateType;cuTemplateTypePullDown","cLabel","cLabel",uTemplateType,1);
else
tTablePullDown("tTemplateType;cuTemplateTypePullDown","cLabel","cLabel",uTemplateType,0);
//cComment
OpenRow(LANG_FL_tTemplate_cComment,"black");
printf("<textarea title='%s' cols=80 wrap=hard rows=16 name=cComment "
,LANG_FT_tTemplate_cComment);
if(guPermLevel>=7 && uMode)
{
printf(">%s</textarea></td></tr>\n",cComment);
}
else
{
printf("disabled>%s</textarea></td></tr>\n",cComment);
printf("<input type=hidden name=cComment value=\"%s\" >\n",EncodeDoubleQuotes(cComment));
}
//cTemplate
OpenRow(LANG_FL_tTemplate_cTemplate,"black");
printf("<textarea title='%s' cols=80 wrap=off rows=16 name=cTemplate "
,LANG_FT_tTemplate_cTemplate);
if(guPermLevel>=7 && uMode)
{
printf(">%s</textarea></td></tr>\n",cTemplate);
}
else
{
printf("disabled>%s</textarea></td></tr>\n",cTemplate);
printf("<input type=hidden name=cTemplate value=\"%s\" >\n",EncodeDoubleQuotes(cTemplate));
}
//uOwner
OpenRow(LANG_FL_tTemplate_uOwner,"black");
if(guPermLevel>=20 && uMode)
{
printf("%s<input type=hidden name=uOwner value=%u >\n",ForeignKey(TCLIENT,"cLabel",uOwner),uOwner);
}
else
{
printf("%s<input type=hidden name=uOwner value=%u >\n",ForeignKey(TCLIENT,"cLabel",uOwner),uOwner);
}
//uCreatedBy
OpenRow(LANG_FL_tTemplate_uCreatedBy,"black");
if(guPermLevel>=20 && uMode)
{
printf("%s<input type=hidden name=uCreatedBy value=%u >\n",ForeignKey(TCLIENT,"cLabel",uCreatedBy),uCreatedBy);
}
else
{
printf("%s<input type=hidden name=uCreatedBy value=%u >\n",ForeignKey(TCLIENT,"cLabel",uCreatedBy),uCreatedBy);
}
//uCreatedDate
OpenRow(LANG_FL_tTemplate_uCreatedDate,"black");
if(uCreatedDate)
printf("%s\n\n",ctime(&uCreatedDate));
else
printf("---\n\n");
printf("<input type=hidden name=uCreatedDate value=%lu >\n",uCreatedDate);
//uModBy
OpenRow(LANG_FL_tTemplate_uModBy,"black");
if(guPermLevel>=20 && uMode)
{
printf("%s<input type=hidden name=uModBy value=%u >\n",ForeignKey(TCLIENT,"cLabel",uModBy),uModBy);
}
else
{
printf("%s<input type=hidden name=uModBy value=%u >\n",ForeignKey(TCLIENT,"cLabel",uModBy),uModBy);
}
//uModDate
OpenRow(LANG_FL_tTemplate_uModDate,"black");
if(uModDate)
printf("%s\n\n",ctime(&uModDate));
else
printf("---\n\n");
printf("<input type=hidden name=uModDate value=%lu >\n",uModDate);
printf("</tr>\n");
}//void tTemplateInput(unsigned uMode)
void NewtTemplate(unsigned uMode)
{
register int i=0;
MYSQL_RES *res;
sprintf(gcQuery,"SELECT uTemplate FROM tTemplate\
WHERE uTemplate=%u"
,uTemplate);
mysql_query(&gMysql,gcQuery);
if(mysql_errno(&gMysql)) htmlPlainTextError(mysql_error(&gMysql));
res=mysql_store_result(&gMysql);
i=mysql_num_rows(res);
if(i)
//tTemplate("<blink>Record already exists");
tTemplate(LANG_NBR_RECEXISTS);
//insert query
Insert_tTemplate();
if(mysql_errno(&gMysql)) htmlPlainTextError(mysql_error(&gMysql));
//sprintf(gcQuery,"New record %u added");
uTemplate=mysql_insert_id(&gMysql);
#ifdef ISM3FIELDS
uCreatedDate=luGetCreatedDate("tTemplate",uTemplate);
unxsISPLog(uTemplate,"tTemplate","New");
#endif
if(!uMode)
{
sprintf(gcQuery,LANG_NBR_NEWRECADDED,uTemplate);
tTemplate(gcQuery);
}
}//NewtTemplate(unsigned uMode)
void DeletetTemplate(void)
{
#ifdef ISM3FIELDS
sprintf(gcQuery,"DELETE FROM tTemplate WHERE uTemplate=%u AND ( uOwner=%u OR %u>9 )"
,uTemplate,guLoginClient,guPermLevel);
#else
sprintf(gcQuery,"DELETE FROM tTemplate WHERE uTemplate=%u"
,uTemplate);
#endif
mysql_query(&gMysql,gcQuery);
if(mysql_errno(&gMysql)) htmlPlainTextError(mysql_error(&gMysql));
//tTemplate("Record Deleted");
if(mysql_affected_rows(&gMysql)>0)
{
#ifdef ISM3FIELDS
unxsISPLog(uTemplate,"tTemplate","Del");
#endif
tTemplate(LANG_NBR_RECDELETED);
}
else
{
#ifdef ISM3FIELDS
unxsISPLog(uTemplate,"tTemplate","DelError");
#endif
tTemplate(LANG_NBR_RECNOTDELETED);
}
}//void DeletetTemplate(void)
void Insert_tTemplate(void)
{
//insert query
sprintf(gcQuery,"INSERT INTO tTemplate SET uTemplate=%u,cLabel='%s',uTemplateSet=%u,uTemplateType=%u,cComment='%s',cTemplate='%s',uOwner=%u,uCreatedBy=%u,uCreatedDate=UNIX_TIMESTAMP(NOW())",
uTemplate
,TextAreaSave(cLabel)
,uTemplateSet
,uTemplateType
,TextAreaSave(cComment)
,TextAreaSave(cTemplate)
,uOwner
,uCreatedBy
);
mysql_query(&gMysql,gcQuery);
}//void Insert_tTemplate(void)
void Update_tTemplate(char *cRowid)
{
//update query
sprintf(gcQuery,"UPDATE tTemplate SET uTemplate=%u,cLabel='%s',uTemplateSet=%u,uTemplateType=%u,cComment='%s',cTemplate='%s',uModBy=%u,uModDate=UNIX_TIMESTAMP(NOW()) WHERE _rowid=%s",
uTemplate
,TextAreaSave(cLabel)
,uTemplateSet
,uTemplateType
,TextAreaSave(cComment)
,TextAreaSave(cTemplate)
,uModBy
,cRowid);
mysql_query(&gMysql,gcQuery);
}//void Update_tTemplate(void)
void ModtTemplate(void)
{
register int i=0;
MYSQL_RES *res;
MYSQL_ROW field;
#ifdef ISM3FIELDS
unsigned uPreModDate=0;
sprintf(gcQuery,"SELECT uTemplate,uModDate FROM tTemplate WHERE uTemplate=%u"
,uTemplate);
#else
sprintf(gcQuery,"SELECT uTemplate FROM tTemplate\
WHERE uTemplate=%u"
,uTemplate);
#endif
mysql_query(&gMysql,gcQuery);
if(mysql_errno(&gMysql)) htmlPlainTextError(mysql_error(&gMysql));
res=mysql_store_result(&gMysql);
i=mysql_num_rows(res);
//if(i<1) tTemplate("<blink>Record does not exist");
if(i<1) tTemplate(LANG_NBR_RECNOTEXIST);
//if(i>1) tTemplate("<blink>Multiple rows!");
if(i>1) tTemplate(LANG_NBR_MULTRECS);
field=mysql_fetch_row(res);
#ifdef ISM3FIELDS
sscanf(field[1],"%u",&uPreModDate);
if(uPreModDate!=uModDate) tTemplate(LANG_NBR_EXTMOD);
#endif
Update_tTemplate(field[0]);
if(mysql_errno(&gMysql)) htmlPlainTextError(mysql_error(&gMysql));
//sprintf(query,"record %s modified",field[0]);
sprintf(gcQuery,LANG_NBRF_REC_MODIFIED,field[0]);
#ifdef ISM3FIELDS
uModDate=luGetModDate("tTemplate",uTemplate);
unxsISPLog(uTemplate,"tTemplate","Mod");
#endif
tTemplate(gcQuery);
}//ModtTemplate(void)
void tTemplateList(void)
{
MYSQL_RES *res;
MYSQL_ROW field;
ExttTemplateListSelect();
mysql_query(&gMysql,gcQuery);
if(mysql_error(&gMysql)[0]) htmlPlainTextError(mysql_error(&gMysql));
res=mysql_store_result(&gMysql);
guI=mysql_num_rows(res);
PageMachine("tTemplateList",1,"");//1 is auto header list guMode. Opens table!
//Filter select drop down
ExttTemplateListFilter();
printf("<input type=text size=16 name=gcCommand maxlength=98 value=\"%s\" >",gcCommand);
printf("</table>\n");
printf("<table bgcolor=#9BC1B3 border=0 width=100%%>\n");
printf("<tr bgcolor=black><td><font face=arial,helvetica color=white>uTemplate<td><font face=arial,helvetica color=white>cLabel<td><font face=arial,helvetica color=white>uTemplateSet<td><font face=arial,helvetica color=white>uTemplateType<td><font face=arial,helvetica color=white>cComment<td><font face=arial,helvetica color=white>cTemplate<td><font face=arial,helvetica color=white>uOwner<td><font face=arial,helvetica color=white>uCreatedBy<td><font face=arial,helvetica color=white>uCreatedDate<td><font face=arial,helvetica color=white>uModBy<td><font face=arial,helvetica color=white>uModDate</tr>");
mysql_data_seek(res,guStart-1);
for(guN=0;guN<(guEnd-guStart+1);guN++)
{
field=mysql_fetch_row(res);
if(!field)
{
printf("<tr><td><font face=arial,helvetica>End of data</table>");
Footer_ism3();
}
if(guN % 2)
printf("<tr bgcolor=#BBE1D3>");
else
printf("<tr>");
time_t luTime8=strtoul(field[8],NULL,10);
char cBuf8[32];
if(luTime8)
ctime_r(&luTime8,cBuf8);
else
sprintf(cBuf8,"---");
time_t luTime10=strtoul(field[10],NULL,10);
char cBuf10[32];
if(luTime10)
ctime_r(&luTime10,cBuf10);
else
sprintf(cBuf10,"---");
printf("<td><input type=submit name=ED%s value=Edit> %s<td>%s<td>%s<td>%s<td><textarea disabled>%s</textarea><td><textarea disabled>%s</textarea><td>%s<td>%s<td>%s<td>%s<td>%s</tr>"
,field[0]
,field[0]
,field[1]
,ForeignKey("tTemplateSet","cLabel",strtoul(field[2],NULL,10))
,ForeignKey("tTemplateType","cLabel",strtoul(field[3],NULL,10))
,field[4]
,field[5]
,ForeignKey(TCLIENT,"cLabel",strtoul(field[6],NULL,10))
,ForeignKey(TCLIENT,"cLabel",strtoul(field[7],NULL,10))
,cBuf8
,ForeignKey(TCLIENT,"cLabel",strtoul(field[9],NULL,10))
,cBuf10
);
}
printf("</table></form>\n");
Footer_ism3();
}//tTemplateList()
void CreatetTemplate(void)
{
sprintf(gcQuery,"CREATE TABLE IF NOT EXISTS tTemplate ( uTemplate INT UNSIGNED PRIMARY KEY AUTO_INCREMENT, cLabel VARCHAR(32) NOT NULL DEFAULT '', uOwner INT UNSIGNED NOT NULL DEFAULT 0,index (uOwner), uCreatedBy INT UNSIGNED NOT NULL DEFAULT 0, uCreatedDate INT UNSIGNED NOT NULL DEFAULT 0, uModBy INT UNSIGNED NOT NULL DEFAULT 0, uModDate INT UNSIGNED NOT NULL DEFAULT 0, cComment TEXT NOT NULL DEFAULT '', cTemplate TEXT NOT NULL DEFAULT '', uTemplateSet INT UNSIGNED NOT NULL DEFAULT 0, uTemplateType INT UNSIGNED NOT NULL DEFAULT 0 )");
mysql_query(&gMysql,gcQuery);
if(mysql_errno(&gMysql))
htmlPlainTextError(mysql_error(&gMysql));
}//CreatetTemplate()
| unxs0/unxsVZ | unxsISP/ttemplate.c | C | gpl-2.0 | 17,292 |
/*
* CUnit - A Unit testing framework library for C.
* Copyright (C) 2004-2006 Jerry St.Clair
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Support for unit tests of CUnit framework
*
* 12-Aug-2004 Initial implementation. (JDS)
*
* 02-May-2006 Added internationalization hooks. (JDS)
*/
/** @file
* CUnit internal testingfunctions (implementation).
*/
/** @addtogroup Internal
@{
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "../../Headers/CUnit.h"
#include "../../Headers/MyMem.h"
#include "../../Headers/Util.h"
#include "../../Headers/CUnit_intl.h"
#include "test_cunit.h"
static unsigned int f_nTests = 0;
static unsigned int f_nFailures = 0;
static unsigned int f_nTests_stored = 0;
static unsigned int f_nFails_stored = 0;
static clock_t f_start_time;
static void test_cunit_initialize(void);
static void test_cunit_report_results(void);
int main()
{
/* No line buffering. */
setbuf(stdout, NULL);
test_cunit_initialize();
fprintf(stdout, "\n%s", _("Testing CUnit internals..."));
/* individual module test functions go here */
test_cunit_CUError();
test_cunit_MyMem();
test_cunit_TestDB();
test_cunit_TestRun();
test_cunit_Util();
test_cunit_report_results();
CU_cleanup_registry();
return 0;
}
void test_cunit_start_tests(const char* strName)
{
fprintf(stdout, _("\n testing %s ... "), strName);
f_nTests_stored = f_nTests;
f_nFails_stored = f_nFailures;
}
void test_cunit_end_tests(void)
{
fprintf(stdout, _("%d assertions, %d failures"),
f_nTests - f_nTests_stored,
f_nFailures - f_nFails_stored);
}
void test_cunit_add_test(void)
{
++f_nTests;
}
void test_cunit_add_failure(void)
{
++f_nFailures;
}
unsigned int test_cunit_test_count(void)
{
return f_nTests;
}
unsigned int test_cunit_failure_count(void)
{
return f_nFailures;
}
void test_cunit_initialize(void)
{
f_nTests = 0;
f_nFailures = 0;
f_start_time = clock();
}
void test_cunit_report_results(void)
{
fprintf(stdout,
"\n\n---------------------------"
"\n%s"
"\n---------------------------"
"\n %s%d"
"\n %s%d"
"\n %s%d"
"\n\n%s%8.3f%s\n",
_("CUnit Internal Test Results"),
_("Total Number of Assertions: "),
f_nTests,
_("Successes: "),
f_nTests-f_nFailures,
_("Failures: "),
f_nFailures,
_("Total test time = "),
((double)clock() - (double)f_start_time)/(double)CLOCKS_PER_SEC,
_(" seconds."));
}
CU_BOOL test_cunit_assert_impl(CU_BOOL value,
const char* condition,
const char* file,
unsigned int line)
{
test_cunit_add_test();
if (CU_FALSE == value) {
test_cunit_add_failure();
printf(_("\nTEST FAILED: File '%s', Line %d, Condition '%s.'\n"),
file, line, condition);
}
return value;
}
| zxgyy/QiNiuBatchUpload | BatchupLoad/qiniu/CUnit/CUnit/Sources/Test/test_cunit.c | C | gpl-2.0 | 3,772 |
/*
Copyright (c) 2004 The Regents of the University of Michigan.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <sys/param.h>
#include <sys/socket.h>
#include <sys/poll.h>
#include <netinet/in.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <memory.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <unistd.h>
#include <dirent.h>
#include "gssd.h"
#include "err_util.h"
#include "nfslib.h"
extern struct pollfd *pollarray;
extern unsigned long pollsize;
#define POLL_MILLISECS 500
static volatile int dir_changed = 1;
static void dir_notify_handler(__attribute__((unused))int sig)
{
dir_changed = 1;
}
static void
scan_poll_results(int ret)
{
int i;
struct clnt_info *clp;
for (clp = clnt_list.tqh_first; clp != NULL; clp = clp->list.tqe_next)
{
i = clp->gssd_poll_index;
if (i >= 0 && pollarray[i].revents) {
if (pollarray[i].revents & POLLHUP) {
clp->gssd_close_me = 1;
dir_changed = 1;
}
if (pollarray[i].revents & POLLIN)
handle_gssd_upcall(clp);
pollarray[clp->gssd_poll_index].revents = 0;
ret--;
if (!ret)
break;
}
i = clp->krb5_poll_index;
if (i >= 0 && pollarray[i].revents) {
if (pollarray[i].revents & POLLHUP) {
clp->krb5_close_me = 1;
dir_changed = 1;
}
if (pollarray[i].revents & POLLIN)
handle_krb5_upcall(clp);
pollarray[clp->krb5_poll_index].revents = 0;
ret--;
if (!ret)
break;
}
}
}
static int
topdirs_add_entry(struct dirent *dent)
{
struct topdirs_info *tdi;
tdi = calloc(sizeof(struct topdirs_info), 1);
if (tdi == NULL) {
printerr(0, "ERROR: Couldn't allocate struct topdirs_info\n");
return -1;
}
tdi->dirname = malloc(PATH_MAX);
if (tdi->dirname == NULL) {
printerr(0, "ERROR: Couldn't allocate directory name\n");
free(tdi);
return -1;
}
snprintf(tdi->dirname, PATH_MAX, "%s/%s", pipefs_dir, dent->d_name);
tdi->fd = open(tdi->dirname, O_RDONLY);
if (tdi->fd == -1) {
printerr(0, "ERROR: failed to open %s\n", tdi->dirname);
free(tdi);
return -1;
}
fcntl(tdi->fd, F_SETSIG, DNOTIFY_SIGNAL);
fcntl(tdi->fd, F_NOTIFY, DN_CREATE|DN_DELETE|DN_MODIFY|DN_MULTISHOT);
TAILQ_INSERT_HEAD(&topdirs_list, tdi, list);
return 0;
}
static void
topdirs_free_list(void)
{
struct topdirs_info *tdi;
TAILQ_FOREACH(tdi, &topdirs_list, list) {
free(tdi->dirname);
if (tdi->fd != -1)
close(tdi->fd);
TAILQ_REMOVE(&topdirs_list, tdi, list);
free(tdi);
}
}
static int
topdirs_init_list(void)
{
DIR *pipedir;
struct dirent *dent;
int ret;
TAILQ_INIT(&topdirs_list);
pipedir = opendir(pipefs_dir);
if (pipedir == NULL) {
printerr(0, "ERROR: could not open rpc_pipefs directory '%s': "
"%s\n", pipefs_dir, strerror(errno));
return -1;
}
for (dent = readdir(pipedir); dent != NULL; dent = readdir(pipedir)) {
if (dent->d_type != DT_DIR ||
strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0) {
continue;
}
ret = topdirs_add_entry(dent);
if (ret)
goto out_err;
}
closedir(pipedir);
return 0;
out_err:
topdirs_free_list();
return -1;
}
#ifdef HAVE_PPOLL
static void gssd_poll(struct pollfd *fds, unsigned long nfds)
{
sigset_t emptyset;
int ret;
sigemptyset(&emptyset);
ret = ppoll(fds, nfds, NULL, &emptyset);
if (ret < 0) {
if (errno != EINTR)
printerr(0, "WARNING: error return from poll\n");
} else if (ret == 0) {
printerr(0, "WARNING: unexpected timeout\n");
} else {
scan_poll_results(ret);
}
}
#else /* !HAVE_PPOLL */
static void gssd_poll(struct pollfd *fds, unsigned long nfds)
{
int ret;
/* race condition here: dir_changed could be set before we
* enter the poll, and we'd never notice if it weren't for the
* timeout. */
ret = poll(fds, nfds, POLL_MILLISECS);
if (ret < 0) {
if (errno != EINTR)
printerr(0, "WARNING: error return from poll\n");
} else if (ret == 0) {
/* timeout */
} else { /* ret > 0 */
scan_poll_results(ret);
}
}
#endif /* !HAVE_PPOLL */
void
gssd_run()
{
struct sigaction dn_act = {
.sa_handler = dir_notify_handler
};
sigset_t set;
sigemptyset(&dn_act.sa_mask);
sigaction(DNOTIFY_SIGNAL, &dn_act, NULL);
/* just in case the signal is blocked... */
sigemptyset(&set);
sigaddset(&set, DNOTIFY_SIGNAL);
sigprocmask(SIG_UNBLOCK, &set, NULL);
if (topdirs_init_list() != 0)
return;
init_client_list();
printerr(1, "beginning poll\n");
while (1) {
while (dir_changed) {
dir_changed = 0;
if (update_client_list()) {
/* Error msg is already printed */
exit(1);
}
/* release the parent after the initial dir scan */
release_parent(pipefds);
}
gssd_poll(pollarray, pollsize);
}
topdirs_free_list();
return;
}
| greearb/nfs-utils-ct | utils/gssd/gssd_main_loop.c | C | gpl-2.0 | 6,223 |
/*
* Universal power supply monitor class
*
* Copyright © 2007 Anton Vorontsov <cbou@mail.ru>
* Copyright © 2004 Szabolcs Gyurko
* Copyright © 2003 Ian Molton <spyro@f2s.com>
*
* Modified: 2004, Oct Szabolcs Gyurko
*
* You may use this code as per GPL version 2
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/err.h>
#include <linux/power_supply.h>
#include <linux/thermal.h>
#include "power_supply.h"
/* exported for the APM Power driver, APM emulation */
struct class *power_supply_class;
EXPORT_SYMBOL_GPL(power_supply_class);
ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
EXPORT_SYMBOL_GPL(power_supply_notifier);
static struct device_type power_supply_dev_type;
static bool __power_supply_is_supplied_by(struct power_supply *supplier,
struct power_supply *supply)
{
int i;
if (!supply->supplied_from && !supplier->supplied_to)
return false;
/* Support both supplied_to and supplied_from modes */
if (supply->supplied_from) {
if (!supplier->name)
return false;
for (i = 0; i < supply->num_supplies; i++)
if (!strcmp(supplier->name, supply->supplied_from[i]))
return true;
} else {
if (!supply->name)
return false;
for (i = 0; i < supplier->num_supplicants; i++)
if (!strcmp(supplier->supplied_to[i], supply->name))
return true;
}
return false;
}
/**
* power_supply_set_current_limit - set current limit
* @psy: the power supply to control
* @limit: current limit in uA from the power supply.
* 0 will disable the power supply.
*
* This function will set a maximum supply current from a source
* and it will disable the charger when limit is 0.
*/
int power_supply_set_current_limit(struct power_supply *psy, int limit)
{
const union power_supply_propval ret = {limit,};
if (psy->set_property)
return psy->set_property(psy, POWER_SUPPLY_PROP_CURRENT_MAX,
&ret);
return -ENXIO;
}
EXPORT_SYMBOL_GPL(power_supply_set_current_limit);
/**
* power_supply_set_charging_enabled - enable or disable charging
* @psy: the power supply to control
* @enable: sets enable property of power supply
*/
int power_supply_set_charging_enabled(struct power_supply *psy, bool enable)
{
const union power_supply_propval ret = {enable,};
if (psy->set_property)
return psy->set_property(psy,
POWER_SUPPLY_PROP_CHARGING_ENABLED,
&ret);
return -ENXIO;
}
EXPORT_SYMBOL_GPL(power_supply_set_charging_enabled);
/**
* power_supply_set_present - set present state of the power supply
* @psy: the power supply to control
* @enable: sets present property of power supply
*/
int power_supply_set_present(struct power_supply *psy, bool enable)
{
const union power_supply_propval ret = {enable,};
if (psy->set_property)
return psy->set_property(psy, POWER_SUPPLY_PROP_PRESENT,
&ret);
return -ENXIO;
}
EXPORT_SYMBOL_GPL(power_supply_set_present);
/**
* power_supply_set_online - set online state of the power supply
* @psy: the power supply to control
* @enable: sets online property of power supply
*/
int power_supply_set_online(struct power_supply *psy, bool enable)
{
const union power_supply_propval ret = {enable,};
if (psy->set_property)
return psy->set_property(psy, POWER_SUPPLY_PROP_ONLINE,
&ret);
return -ENXIO;
}
EXPORT_SYMBOL_GPL(power_supply_set_online);
/** power_supply_set_health_state - set health state of the power supply
* @psy: the power supply to control
* @health: sets health property of power supply
*/
int power_supply_set_health_state(struct power_supply *psy, int health)
{
const union power_supply_propval ret = {health,};
if (psy->set_property)
return psy->set_property(psy, POWER_SUPPLY_PROP_HEALTH,
&ret);
return -ENXIO;
}
EXPORT_SYMBOL(power_supply_set_health_state);
/**
* power_supply_set_scope - set scope of the power supply
* @psy: the power supply to control
* @scope: value to set the scope property to, should be from
* the SCOPE enum in power_supply.h
*/
int power_supply_set_scope(struct power_supply *psy, int scope)
{
const union power_supply_propval ret = {scope, };
if (psy->set_property)
return psy->set_property(psy, POWER_SUPPLY_PROP_SCOPE,
&ret);
return -ENXIO;
}
EXPORT_SYMBOL_GPL(power_supply_set_scope);
/**
* power_supply_set_supply_type - set type of the power supply
* @psy: the power supply to control
* @supply_type: sets type property of power supply
*/
int power_supply_set_supply_type(struct power_supply *psy,
enum power_supply_type supply_type)
{
const union power_supply_propval ret = {supply_type,};
if (psy->set_property)
return psy->set_property(psy, POWER_SUPPLY_PROP_TYPE,
&ret);
return -ENXIO;
}
EXPORT_SYMBOL_GPL(power_supply_set_supply_type);
/**
* power_supply_set_charge_type - set charge type of the power supply
* @psy: the power supply to control
* @enable: sets charge type property of power supply
*/
int power_supply_set_charge_type(struct power_supply *psy, int charge_type)
{
const union power_supply_propval ret = {charge_type,};
if (psy->set_property)
return psy->set_property(psy, POWER_SUPPLY_PROP_CHARGE_TYPE,
&ret);
return -ENXIO;
}
EXPORT_SYMBOL_GPL(power_supply_set_charge_type);
static int __power_supply_changed_work(struct device *dev, void *data)
{
struct power_supply *psy = (struct power_supply *)data;
struct power_supply *pst = dev_get_drvdata(dev);
if (__power_supply_is_supplied_by(psy, pst)) {
if (pst->external_power_changed)
pst->external_power_changed(pst);
}
return 0;
}
static void power_supply_changed_work(struct work_struct *work)
{
unsigned long flags;
struct power_supply *psy = container_of(work, struct power_supply,
changed_work);
dev_dbg(psy->dev, "%s\n", __func__);
spin_lock_irqsave(&psy->changed_lock, flags);
if (psy->changed) {
psy->changed = false;
spin_unlock_irqrestore(&psy->changed_lock, flags);
class_for_each_device(power_supply_class, NULL, psy,
__power_supply_changed_work);
power_supply_update_leds(psy);
atomic_notifier_call_chain(&power_supply_notifier,
PSY_EVENT_PROP_CHANGED, psy);
kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
spin_lock_irqsave(&psy->changed_lock, flags);
}
/*
* Dependent power supplies (e.g. battery) may have changed state
* as a result of this event, so poll again and hold the
* wakeup_source until all events are processed.
*/
if (!psy->changed)
pm_relax(psy->dev);
spin_unlock_irqrestore(&psy->changed_lock, flags);
}
void power_supply_changed(struct power_supply *psy)
{
unsigned long flags;
dev_dbg(psy->dev, "%s\n", __func__);
spin_lock_irqsave(&psy->changed_lock, flags);
psy->changed = true;
pm_stay_awake(psy->dev);
spin_unlock_irqrestore(&psy->changed_lock, flags);
schedule_work(&psy->changed_work);
}
EXPORT_SYMBOL_GPL(power_supply_changed);
#ifdef CONFIG_OF
#include <linux/of.h>
static int __power_supply_populate_supplied_from(struct device *dev,
void *data)
{
struct power_supply *psy = (struct power_supply *)data;
struct power_supply *epsy = dev_get_drvdata(dev);
struct device_node *np;
int i = 0;
do {
np = of_parse_phandle(psy->of_node, "power-supplies", i++);
if (!np)
continue;
if (np == epsy->of_node) {
dev_info(psy->dev, "%s: Found supply : %s\n",
psy->name, epsy->name);
psy->supplied_from[i-1] = (char *)epsy->name;
psy->num_supplies++;
of_node_put(np);
break;
}
of_node_put(np);
} while (np);
return 0;
}
static int power_supply_populate_supplied_from(struct power_supply *psy)
{
int error;
error = class_for_each_device(power_supply_class, NULL, psy,
__power_supply_populate_supplied_from);
dev_dbg(psy->dev, "%s %d\n", __func__, error);
return error;
}
static int __power_supply_find_supply_from_node(struct device *dev,
void *data)
{
struct device_node *np = (struct device_node *)data;
struct power_supply *epsy = dev_get_drvdata(dev);
/* return error breaks out of class_for_each_device loop */
if (epsy->of_node == np)
return -EINVAL;
return 0;
}
static int power_supply_find_supply_from_node(struct device_node *supply_node)
{
int error;
struct device *dev;
struct class_dev_iter iter;
/*
* Use iterator to see if any other device is registered.
* This is required since class_for_each_device returns 0
* if there are no devices registered.
*/
class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
dev = class_dev_iter_next(&iter);
if (!dev)
return -EPROBE_DEFER;
/*
* We have to treat the return value as inverted, because if
* we return error on not found, then it won't continue looking.
* So we trick it by returning error on success to stop looking
* once the matching device is found.
*/
error = class_for_each_device(power_supply_class, NULL, supply_node,
__power_supply_find_supply_from_node);
return error ? 0 : -EPROBE_DEFER;
}
static int power_supply_check_supplies(struct power_supply *psy)
{
struct device_node *np;
int cnt = 0;
/* If there is already a list honor it */
if (psy->supplied_from && psy->num_supplies > 0)
return 0;
/* No device node found, nothing to do */
if (!psy->of_node)
return 0;
do {
int ret;
np = of_parse_phandle(psy->of_node, "power-supplies", cnt++);
if (!np)
continue;
ret = power_supply_find_supply_from_node(np);
if (ret) {
dev_dbg(psy->dev, "Failed to find supply, defer!\n");
of_node_put(np);
return -EPROBE_DEFER;
}
of_node_put(np);
} while (np);
/* All supplies found, allocate char ** array for filling */
psy->supplied_from = devm_kzalloc(psy->dev, sizeof(psy->supplied_from),
GFP_KERNEL);
if (!psy->supplied_from) {
dev_err(psy->dev, "Couldn't allocate memory for supply list\n");
return -ENOMEM;
}
*psy->supplied_from = devm_kzalloc(psy->dev, sizeof(char *) * cnt,
GFP_KERNEL);
if (!*psy->supplied_from) {
dev_err(psy->dev, "Couldn't allocate memory for supply list\n");
return -ENOMEM;
}
return power_supply_populate_supplied_from(psy);
}
#else
static inline int power_supply_check_supplies(struct power_supply *psy)
{
return 0;
}
#endif
static int __power_supply_am_i_supplied(struct device *dev, void *data)
{
union power_supply_propval ret = {0,};
struct power_supply *psy = (struct power_supply *)data;
struct power_supply *epsy = dev_get_drvdata(dev);
if (__power_supply_is_supplied_by(epsy, psy))
if (!epsy->get_property(epsy, POWER_SUPPLY_PROP_ONLINE, &ret)) {
if (ret.intval)
return ret.intval;
}
return 0;
}
int power_supply_am_i_supplied(struct power_supply *psy)
{
int error;
error = class_for_each_device(power_supply_class, NULL, psy,
__power_supply_am_i_supplied);
dev_dbg(psy->dev, "%s %d\n", __func__, error);
return error;
}
EXPORT_SYMBOL_GPL(power_supply_am_i_supplied);
static int __power_supply_is_system_supplied(struct device *dev, void *data)
{
union power_supply_propval ret = {0,};
struct power_supply *psy = dev_get_drvdata(dev);
unsigned int *count = data;
(*count)++;
if (psy->type != POWER_SUPPLY_TYPE_BATTERY) {
if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &ret))
return 0;
if (ret.intval)
return ret.intval;
}
return 0;
}
int power_supply_is_system_supplied(void)
{
int error;
unsigned int count = 0;
error = class_for_each_device(power_supply_class, NULL, &count,
__power_supply_is_system_supplied);
/*
* If no power class device was found at all, most probably we are
* running on a desktop system, so assume we are on mains power.
*/
if (count == 0)
return 1;
return error;
}
EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
int power_supply_set_battery_charged(struct power_supply *psy)
{
if (psy->type == POWER_SUPPLY_TYPE_BATTERY && psy->set_charged) {
psy->set_charged(psy);
return 0;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(power_supply_set_battery_charged);
static int power_supply_match_device_by_name(struct device *dev, const void *data)
{
const char *name = data;
struct power_supply *psy = dev_get_drvdata(dev);
return strcmp(psy->name, name) == 0;
}
struct power_supply *power_supply_get_by_name(const char *name)
{
struct device *dev = class_find_device(power_supply_class, NULL, name,
power_supply_match_device_by_name);
return dev ? dev_get_drvdata(dev) : NULL;
}
EXPORT_SYMBOL_GPL(power_supply_get_by_name);
#ifdef CONFIG_OF
static int power_supply_match_device_node(struct device *dev, const void *data)
{
return dev->parent && dev->parent->of_node == data;
}
struct power_supply *power_supply_get_by_phandle(struct device_node *np,
const char *property)
{
struct device_node *power_supply_np;
struct device *dev;
power_supply_np = of_parse_phandle(np, property, 0);
if (!power_supply_np)
return ERR_PTR(-ENODEV);
dev = class_find_device(power_supply_class, NULL, power_supply_np,
power_supply_match_device_node);
of_node_put(power_supply_np);
return dev ? dev_get_drvdata(dev) : NULL;
}
EXPORT_SYMBOL_GPL(power_supply_get_by_phandle);
#endif /* CONFIG_OF */
int power_supply_powers(struct power_supply *psy, struct device *dev)
{
return sysfs_create_link(&psy->dev->kobj, &dev->kobj, "powers");
}
EXPORT_SYMBOL_GPL(power_supply_powers);
static void power_supply_dev_release(struct device *dev)
{
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
kfree(dev);
}
int power_supply_reg_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&power_supply_notifier, nb);
}
EXPORT_SYMBOL_GPL(power_supply_reg_notifier);
void power_supply_unreg_notifier(struct notifier_block *nb)
{
atomic_notifier_chain_unregister(&power_supply_notifier, nb);
}
EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);
#ifdef CONFIG_THERMAL
static int power_supply_read_temp(struct thermal_zone_device *tzd,
unsigned long *temp)
{
struct power_supply *psy;
union power_supply_propval val;
int ret;
WARN_ON(tzd == NULL);
psy = tzd->devdata;
ret = psy->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
/* Convert tenths of degree Celsius to milli degree Celsius. */
if (!ret)
*temp = val.intval * 100;
return ret;
}
static struct thermal_zone_device_ops psy_tzd_ops = {
.get_temp = power_supply_read_temp,
};
static int psy_register_thermal(struct power_supply *psy)
{
int i;
/* Register battery zone device psy reports temperature */
for (i = 0; i < psy->num_properties; i++) {
if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) {
psy->tzd = thermal_zone_device_register(psy->name, 0, 0,
psy, &psy_tzd_ops, NULL, 0, 0);
if (IS_ERR(psy->tzd))
return PTR_ERR(psy->tzd);
break;
}
}
return 0;
}
static void psy_unregister_thermal(struct power_supply *psy)
{
if (IS_ERR_OR_NULL(psy->tzd))
return;
thermal_zone_device_unregister(psy->tzd);
}
/* thermal cooling device callbacks */
static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
unsigned long *state)
{
struct power_supply *psy;
union power_supply_propval val;
int ret;
psy = tcd->devdata;
ret = psy->get_property(psy,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
if (!ret)
*state = val.intval;
return ret;
}
static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
unsigned long *state)
{
struct power_supply *psy;
union power_supply_propval val;
int ret;
psy = tcd->devdata;
ret = psy->get_property(psy,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
if (!ret)
*state = val.intval;
return ret;
}
static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
unsigned long state)
{
struct power_supply *psy;
union power_supply_propval val;
int ret;
psy = tcd->devdata;
val.intval = state;
ret = psy->set_property(psy,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
return ret;
}
static struct thermal_cooling_device_ops psy_tcd_ops = {
.get_max_state = ps_get_max_charge_cntl_limit,
.get_cur_state = ps_get_cur_chrage_cntl_limit,
.set_cur_state = ps_set_cur_charge_cntl_limit,
};
static int psy_register_cooler(struct power_supply *psy)
{
int i;
/* Register for cooling device if psy can control charging */
for (i = 0; i < psy->num_properties; i++) {
if (psy->properties[i] ==
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT) {
psy->tcd = thermal_cooling_device_register(
(char *)psy->name,
psy, &psy_tcd_ops);
if (IS_ERR(psy->tcd))
return PTR_ERR(psy->tcd);
break;
}
}
return 0;
}
static void psy_unregister_cooler(struct power_supply *psy)
{
if (IS_ERR_OR_NULL(psy->tcd))
return;
thermal_cooling_device_unregister(psy->tcd);
}
#else
static int psy_register_thermal(struct power_supply *psy)
{
return 0;
}
static void psy_unregister_thermal(struct power_supply *psy)
{
}
static int psy_register_cooler(struct power_supply *psy)
{
return 0;
}
static void psy_unregister_cooler(struct power_supply *psy)
{
}
#endif
int power_supply_register(struct device *parent, struct power_supply *psy)
{
struct device *dev;
int rc;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
device_initialize(dev);
dev->class = power_supply_class;
dev->type = &power_supply_dev_type;
dev->parent = parent;
dev->release = power_supply_dev_release;
dev_set_drvdata(dev, psy);
psy->dev = dev;
rc = dev_set_name(dev, "%s", psy->name);
if (rc)
goto dev_set_name_failed;
INIT_WORK(&psy->changed_work, power_supply_changed_work);
rc = power_supply_check_supplies(psy);
if (rc) {
dev_info(dev, "Not all required supplies found, defer probe\n");
goto check_supplies_failed;
}
spin_lock_init(&psy->changed_lock);
rc = device_init_wakeup(dev, true);
if (rc)
goto wakeup_init_failed;
rc = device_add(dev);
if (rc)
goto device_add_failed;
rc = psy_register_thermal(psy);
if (rc)
goto register_thermal_failed;
rc = psy_register_cooler(psy);
if (rc)
goto register_cooler_failed;
rc = power_supply_create_triggers(psy);
if (rc)
goto create_triggers_failed;
power_supply_changed(psy);
goto success;
create_triggers_failed:
psy_unregister_cooler(psy);
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
device_del(dev);
device_add_failed:
wakeup_init_failed:
check_supplies_failed:
dev_set_name_failed:
put_device(dev);
success:
return rc;
}
EXPORT_SYMBOL_GPL(power_supply_register);
void power_supply_unregister(struct power_supply *psy)
{
cancel_work_sync(&psy->changed_work);
sysfs_remove_link(&psy->dev->kobj, "powers");
power_supply_remove_triggers(psy);
psy_unregister_cooler(psy);
psy_unregister_thermal(psy);
device_init_wakeup(psy->dev, false);
device_unregister(psy->dev);
}
EXPORT_SYMBOL_GPL(power_supply_unregister);
static int __init power_supply_class_init(void)
{
power_supply_class = class_create(THIS_MODULE, "power_supply");
if (IS_ERR(power_supply_class))
return PTR_ERR(power_supply_class);
power_supply_class->dev_uevent = power_supply_uevent;
power_supply_init_attrs(&power_supply_dev_type);
return 0;
}
static void __exit power_supply_class_exit(void)
{
class_destroy(power_supply_class);
}
subsys_initcall(power_supply_class_init);
module_exit(power_supply_class_exit);
MODULE_DESCRIPTION("Universal power supply monitor class");
MODULE_AUTHOR("Ian Molton <spyro@f2s.com>, "
"Szabolcs Gyurko, "
"Anton Vorontsov <cbou@mail.ru>");
MODULE_LICENSE("GPL");
| titusece/linux_imx | drivers/power/power_supply_core.c | C | gpl-2.0 | 19,435 |
/*
* This file implements the perfmon-2 subsystem which is used
* to program the IA-64 Performance Monitoring Unit (PMU).
*
* The initial version of perfmon.c was written by
* Ganesh Venkitachalam, IBM Corp.
*
* Then it was modified for perfmon-1.x by Stephane Eranian and
* David Mosberger, Hewlett Packard Co.
*
* Version Perfmon-2.x is a rewrite of perfmon-1.x
* by Stephane Eranian, Hewlett Packard Co.
*
* Copyright (C) 1999-2005 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* More information about perfmon available at:
* http://www.hpl.hp.com/research/linux/perfmon
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/list.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/vfs.h>
#include <linux/smp.h>
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/rcupdate.h>
#include <linux/completion.h>
#include <linux/tracehook.h>
#include <linux/slab.h>
#include <asm/errno.h>
#include <asm/intrinsics.h>
#include <asm/page.h>
#include <asm/perfmon.h>
#include <asm/processor.h>
#include <asm/signal.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#ifdef CONFIG_PERFMON
/*
* perfmon context state
*/
#define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
#define PFM_CTX_LOADED 2 /* context is loaded onto a task */
#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
#define PFM_INVALID_ACTIVATION (~0UL)
#define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
#define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
/*
* depth of message queue
*/
#define PFM_MAX_MSGS 32
#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
/*
* type of a PMU register (bitmask).
* bitmask structure:
* bit0 : register implemented
* bit1 : end marker
* bit2-3 : reserved
* bit4 : pmc has pmc.pm
* bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
* bit6-7 : register type
* bit8-31: reserved
*/
#define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
#define PFM_REG_IMPL 0x1 /* register implemented */
#define PFM_REG_END 0x2 /* end marker */
#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
/* i assumed unsigned */
#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
/* XXX: these assume that register i is implemented */
#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
#define PFM_CTX_TASK(h) (h)->ctx_task
#define PMU_PMC_OI 5 /* position of pmc.oi bit */
/* XXX: does not support more than 64 PMDs */
#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
#define PFM_CODE_RR 0 /* requesting code range restriction */
#define PFM_DATA_RR 1 /* requestion data range restriction */
#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
#define RDEP(x) (1UL<<(x))
/*
* context protection macros
* in SMP:
* - we need to protect against CPU concurrency (spin_lock)
* - we need to protect against PMU overflow interrupts (local_irq_disable)
* in UP:
* - we need to protect against PMU overflow interrupts (local_irq_disable)
*
* spin_lock_irqsave()/spin_unlock_irqrestore():
* in SMP: local_irq_disable + spin_lock
* in UP : local_irq_disable
*
* spin_lock()/spin_lock():
* in UP : removed automatically
* in SMP: protect against context accesses from other CPU. interrupts
* are not masked. This is useful for the PMU interrupt handler
* because we know we will not get PMU concurrency in that code.
*/
#define PROTECT_CTX(c, f) \
do { \
DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
spin_lock_irqsave(&(c)->ctx_lock, f); \
DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
} while(0)
#define UNPROTECT_CTX(c, f) \
do { \
DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
spin_unlock_irqrestore(&(c)->ctx_lock, f); \
} while(0)
#define PROTECT_CTX_NOPRINT(c, f) \
do { \
spin_lock_irqsave(&(c)->ctx_lock, f); \
} while(0)
#define UNPROTECT_CTX_NOPRINT(c, f) \
do { \
spin_unlock_irqrestore(&(c)->ctx_lock, f); \
} while(0)
#define PROTECT_CTX_NOIRQ(c) \
do { \
spin_lock(&(c)->ctx_lock); \
} while(0)
#define UNPROTECT_CTX_NOIRQ(c) \
do { \
spin_unlock(&(c)->ctx_lock); \
} while(0)
#ifdef CONFIG_SMP
#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
#else /* !CONFIG_SMP */
#define SET_ACTIVATION(t) do {} while(0)
#define GET_ACTIVATION(t) do {} while(0)
#define INC_ACTIVATION(t) do {} while(0)
#endif /* CONFIG_SMP */
#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
/*
* cmp0 must be the value of pmc0
*/
#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
#define PFMFS_MAGIC 0xa0b4d889
/*
* debugging
*/
#define PFM_DEBUGGING 1
#ifdef PFM_DEBUGGING
#define DPRINT(a) \
do { \
if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
} while (0)
#define DPRINT_ovfl(a) \
do { \
if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
} while (0)
#endif
/*
* 64-bit software counter structure
*
* the next_reset_type is applied to the next call to pfm_reset_regs()
*/
typedef struct {
unsigned long val; /* virtual 64bit counter value */
unsigned long lval; /* last reset value */
unsigned long long_reset; /* reset value on sampling overflow */
unsigned long short_reset; /* reset value on overflow */
unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
unsigned long seed; /* seed for random-number generator */
unsigned long mask; /* mask for random-number generator */
unsigned int flags; /* notify/do not notify */
unsigned long eventid; /* overflow event identifier */
} pfm_counter_t;
/*
* context flags
*/
typedef struct {
unsigned int block:1; /* when 1, task will blocked on user notifications */
unsigned int system:1; /* do system wide monitoring */
unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
unsigned int is_sampling:1; /* true if using a custom format */
unsigned int excl_idle:1; /* exclude idle task in system wide session */
unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
unsigned int no_msg:1; /* no message sent on overflow */
unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
unsigned int reserved:22;
} pfm_context_flags_t;
#define PFM_TRAP_REASON_NONE 0x0 /* default value */
#define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
#define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
/*
* perfmon context: encapsulates all the state of a monitoring session
*/
typedef struct pfm_context {
spinlock_t ctx_lock; /* context protection */
pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
unsigned int ctx_state; /* state: active/inactive (no bitfield) */
struct task_struct *ctx_task; /* task to which context is attached */
unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
struct completion ctx_restart_done; /* use for blocking notification mode */
unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
unsigned long ctx_saved_psr_up; /* only contains psr.up value */
unsigned long ctx_last_activation; /* context last activation number for last_cpu */
unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
int ctx_fd; /* file descriptor used my this context */
pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
unsigned long ctx_smpl_size; /* size of sampling buffer */
void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
wait_queue_head_t ctx_msgq_wait;
pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
int ctx_msgq_head;
int ctx_msgq_tail;
struct fasync_struct *ctx_async_queue;
wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
} pfm_context_t;
/*
* magic number used to verify that structure is really
* a perfmon context
*/
#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
#ifdef CONFIG_SMP
#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
#else
#define SET_LAST_CPU(ctx, v) do {} while(0)
#define GET_LAST_CPU(ctx) do {} while(0)
#endif
#define ctx_fl_block ctx_flags.block
#define ctx_fl_system ctx_flags.system
#define ctx_fl_using_dbreg ctx_flags.using_dbreg
#define ctx_fl_is_sampling ctx_flags.is_sampling
#define ctx_fl_excl_idle ctx_flags.excl_idle
#define ctx_fl_going_zombie ctx_flags.going_zombie
#define ctx_fl_trap_reason ctx_flags.trap_reason
#define ctx_fl_no_msg ctx_flags.no_msg
#define ctx_fl_can_restart ctx_flags.can_restart
#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
/*
* global information about all sessions
* mostly used to synchronize between system wide and per-process
*/
typedef struct {
spinlock_t pfs_lock; /* lock the structure */
unsigned int pfs_task_sessions; /* number of per task sessions */
unsigned int pfs_sys_sessions; /* number of per system wide sessions */
unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
} pfm_session_t;
/*
* information about a PMC or PMD.
* dep_pmd[]: a bitmask of dependent PMD registers
* dep_pmc[]: a bitmask of dependent PMC registers
*/
typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
typedef struct {
unsigned int type;
int pm_pos;
unsigned long default_value; /* power-on default value */
unsigned long reserved_mask; /* bitmask of reserved bits */
pfm_reg_check_t read_check;
pfm_reg_check_t write_check;
unsigned long dep_pmd[4];
unsigned long dep_pmc[4];
} pfm_reg_desc_t;
/* assume cnum is a valid monitor */
#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
/*
* This structure is initialized at boot time and contains
* a description of the PMU main characteristics.
*
* If the probe function is defined, detection is based
* on its return value:
* - 0 means recognized PMU
* - anything else means not supported
* When the probe function is not defined, then the pmu_family field
* is used and it must match the host CPU family such that:
* - cpu->family & config->pmu_family != 0
*/
typedef struct {
unsigned long ovfl_val; /* overflow value for counters */
pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
unsigned int num_pmcs; /* number of PMCS: computed at init time */
unsigned int num_pmds; /* number of PMDS: computed at init time */
unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
char *pmu_name; /* PMU family name */
unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
unsigned int flags; /* pmu specific flags */
unsigned int num_ibrs; /* number of IBRS: computed at init time */
unsigned int num_dbrs; /* number of DBRS: computed at init time */
unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
int (*probe)(void); /* customized probe routine */
unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
} pmu_config_t;
/*
* PMU specific flags
*/
#define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
/*
* debug register related type definitions
*/
typedef struct {
unsigned long ibr_mask:56;
unsigned long ibr_plm:4;
unsigned long ibr_ig:3;
unsigned long ibr_x:1;
} ibr_mask_reg_t;
typedef struct {
unsigned long dbr_mask:56;
unsigned long dbr_plm:4;
unsigned long dbr_ig:2;
unsigned long dbr_w:1;
unsigned long dbr_r:1;
} dbr_mask_reg_t;
typedef union {
unsigned long val;
ibr_mask_reg_t ibr;
dbr_mask_reg_t dbr;
} dbreg_t;
/*
* perfmon command descriptions
*/
typedef struct {
int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
char *cmd_name;
int cmd_flags;
unsigned int cmd_narg;
size_t cmd_argsize;
int (*cmd_getsize)(void *arg, size_t *sz);
} pfm_cmd_desc_t;
#define PFM_CMD_FD 0x01 /* command requires a file descriptor */
#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
typedef struct {
unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
unsigned long pfm_smpl_handler_calls;
unsigned long pfm_smpl_handler_cycles;
char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
} pfm_stats_t;
/*
* perfmon internal variables
*/
static pfm_stats_t pfm_stats[NR_CPUS];
static pfm_session_t pfm_sessions; /* global sessions information */
static DEFINE_SPINLOCK(pfm_alt_install_check);
static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
static struct proc_dir_entry *perfmon_dir;
static pfm_uuid_t pfm_null_uuid = {0,};
static spinlock_t pfm_buffer_fmt_lock;
static LIST_HEAD(pfm_buffer_fmt_list);
static pmu_config_t *pmu_conf;
/* sysctl() controls */
pfm_sysctl_t pfm_sysctl;
EXPORT_SYMBOL(pfm_sysctl);
static ctl_table pfm_ctl_table[]={
{
.procname = "debug",
.data = &pfm_sysctl.debug,
.maxlen = sizeof(int),
.mode = 0666,
.proc_handler = proc_dointvec,
},
{
.procname = "debug_ovfl",
.data = &pfm_sysctl.debug_ovfl,
.maxlen = sizeof(int),
.mode = 0666,
.proc_handler = proc_dointvec,
},
{
.procname = "fastctxsw",
.data = &pfm_sysctl.fastctxsw,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = proc_dointvec,
},
{
.procname = "expert_mode",
.data = &pfm_sysctl.expert_mode,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = proc_dointvec,
},
{}
};
static ctl_table pfm_sysctl_dir[] = {
{
.procname = "perfmon",
.mode = 0555,
.child = pfm_ctl_table,
},
{}
};
static ctl_table pfm_sysctl_root[] = {
{
.procname = "kernel",
.mode = 0555,
.child = pfm_sysctl_dir,
},
{}
};
static struct ctl_table_header *pfm_sysctl_header;
static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
static inline void
pfm_put_task(struct task_struct *task)
{
if (task != current) put_task_struct(task);
}
static inline void
pfm_reserve_page(unsigned long a)
{
SetPageReserved(vmalloc_to_page((void *)a));
}
static inline void
pfm_unreserve_page(unsigned long a)
{
ClearPageReserved(vmalloc_to_page((void*)a));
}
static inline unsigned long
pfm_protect_ctx_ctxsw(pfm_context_t *x)
{
spin_lock(&(x)->ctx_lock);
return 0UL;
}
static inline void
pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
{
spin_unlock(&(x)->ctx_lock);
}
static inline unsigned long
pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
{
return get_unmapped_area(file, addr, len, pgoff, flags);
}
/* forward declaration */
static const struct dentry_operations pfmfs_dentry_operations;
static struct dentry *
pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
{
return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
PFMFS_MAGIC);
}
static struct file_system_type pfm_fs_type = {
.name = "pfmfs",
.mount = pfmfs_mount,
.kill_sb = kill_anon_super,
};
DEFINE_PER_CPU(unsigned long, pfm_syst_info);
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
DEFINE_PER_CPU(unsigned long, pmu_activation_number);
EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
/* forward declaration */
static const struct file_operations pfm_file_ops;
/*
* forward declarations
*/
#ifndef CONFIG_SMP
static void pfm_lazy_save_regs (struct task_struct *ta);
#endif
void dump_pmu_state(const char *);
static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
#include "perfmon_itanium.h"
#include "perfmon_mckinley.h"
#include "perfmon_montecito.h"
#include "perfmon_generic.h"
static pmu_config_t *pmu_confs[]={
&pmu_conf_mont,
&pmu_conf_mck,
&pmu_conf_ita,
&pmu_conf_gen, /* must be last */
NULL
};
static int pfm_end_notify_user(pfm_context_t *ctx);
static inline void
pfm_clear_psr_pp(void)
{
ia64_rsm(IA64_PSR_PP);
ia64_srlz_i();
}
static inline void
pfm_set_psr_pp(void)
{
ia64_ssm(IA64_PSR_PP);
ia64_srlz_i();
}
static inline void
pfm_clear_psr_up(void)
{
ia64_rsm(IA64_PSR_UP);
ia64_srlz_i();
}
static inline void
pfm_set_psr_up(void)
{
ia64_ssm(IA64_PSR_UP);
ia64_srlz_i();
}
static inline unsigned long
pfm_get_psr(void)
{
unsigned long tmp;
tmp = ia64_getreg(_IA64_REG_PSR);
ia64_srlz_i();
return tmp;
}
static inline void
pfm_set_psr_l(unsigned long val)
{
ia64_setreg(_IA64_REG_PSR_L, val);
ia64_srlz_i();
}
static inline void
pfm_freeze_pmu(void)
{
ia64_set_pmc(0,1UL);
ia64_srlz_d();
}
static inline void
pfm_unfreeze_pmu(void)
{
ia64_set_pmc(0,0UL);
ia64_srlz_d();
}
static inline void
pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
{
int i;
for (i=0; i < nibrs; i++) {
ia64_set_ibr(i, ibrs[i]);
ia64_dv_serialize_instruction();
}
ia64_srlz_i();
}
static inline void
pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
{
int i;
for (i=0; i < ndbrs; i++) {
ia64_set_dbr(i, dbrs[i]);
ia64_dv_serialize_data();
}
ia64_srlz_d();
}
/*
* PMD[i] must be a counter. no check is made
*/
static inline unsigned long
pfm_read_soft_counter(pfm_context_t *ctx, int i)
{
return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
}
/*
* PMD[i] must be a counter. no check is made
*/
static inline void
pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
{
unsigned long ovfl_val = pmu_conf->ovfl_val;
ctx->ctx_pmds[i].val = val & ~ovfl_val;
/*
* writing to unimplemented part is ignore, so we do not need to
* mask off top part
*/
ia64_set_pmd(i, val & ovfl_val);
}
static pfm_msg_t *
pfm_get_new_msg(pfm_context_t *ctx)
{
int idx, next;
next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
if (next == ctx->ctx_msgq_head) return NULL;
idx = ctx->ctx_msgq_tail;
ctx->ctx_msgq_tail = next;
DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
return ctx->ctx_msgq+idx;
}
static pfm_msg_t *
pfm_get_next_msg(pfm_context_t *ctx)
{
pfm_msg_t *msg;
DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
if (PFM_CTXQ_EMPTY(ctx)) return NULL;
/*
* get oldest message
*/
msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
/*
* and move forward
*/
ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
return msg;
}
static void
pfm_reset_msgq(pfm_context_t *ctx)
{
ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
DPRINT(("ctx=%p msgq reset\n", ctx));
}
static void *
pfm_rvmalloc(unsigned long size)
{
void *mem;
unsigned long addr;
size = PAGE_ALIGN(size);
mem = vzalloc(size);
if (mem) {
//printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
addr = (unsigned long)mem;
while (size > 0) {
pfm_reserve_page(addr);
addr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
}
return mem;
}
static void
pfm_rvfree(void *mem, unsigned long size)
{
unsigned long addr;
if (mem) {
DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
addr = (unsigned long) mem;
while ((long) size > 0) {
pfm_unreserve_page(addr);
addr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
vfree(mem);
}
return;
}
static pfm_context_t *
pfm_context_alloc(int ctx_flags)
{
pfm_context_t *ctx;
/*
* allocate context descriptor
* must be able to free with interrupts disabled
*/
ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
if (ctx) {
DPRINT(("alloc ctx @%p\n", ctx));
/*
* init context protection lock
*/
spin_lock_init(&ctx->ctx_lock);
/*
* context is unloaded
*/
ctx->ctx_state = PFM_CTX_UNLOADED;
/*
* initialization of context's flags
*/
ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
/*
* will move to set properties
* ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
*/
/*
* init restart semaphore to locked
*/
init_completion(&ctx->ctx_restart_done);
/*
* activation is used in SMP only
*/
ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
SET_LAST_CPU(ctx, -1);
/*
* initialize notification message queue
*/
ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
init_waitqueue_head(&ctx->ctx_msgq_wait);
init_waitqueue_head(&ctx->ctx_zombieq);
}
return ctx;
}
static void
pfm_context_free(pfm_context_t *ctx)
{
if (ctx) {
DPRINT(("free ctx @%p\n", ctx));
kfree(ctx);
}
}
static void
pfm_mask_monitoring(struct task_struct *task)
{
pfm_context_t *ctx = PFM_GET_CTX(task);
unsigned long mask, val, ovfl_mask;
int i;
DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
ovfl_mask = pmu_conf->ovfl_val;
/*
* monitoring can only be masked as a result of a valid
* counter overflow. In UP, it means that the PMU still
* has an owner. Note that the owner can be different
* from the current task. However the PMU state belongs
* to the owner.
* In SMP, a valid overflow only happens when task is
* current. Therefore if we come here, we know that
* the PMU state belongs to the current task, therefore
* we can access the live registers.
*
* So in both cases, the live register contains the owner's
* state. We can ONLY touch the PMU registers and NOT the PSR.
*
* As a consequence to this call, the ctx->th_pmds[] array
* contains stale information which must be ignored
* when context is reloaded AND monitoring is active (see
* pfm_restart).
*/
mask = ctx->ctx_used_pmds[0];
for (i = 0; mask; i++, mask>>=1) {
/* skip non used pmds */
if ((mask & 0x1) == 0) continue;
val = ia64_get_pmd(i);
if (PMD_IS_COUNTING(i)) {
/*
* we rebuild the full 64 bit value of the counter
*/
ctx->ctx_pmds[i].val += (val & ovfl_mask);
} else {
ctx->ctx_pmds[i].val = val;
}
DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
i,
ctx->ctx_pmds[i].val,
val & ovfl_mask));
}
/*
* mask monitoring by setting the privilege level to 0
* we cannot use psr.pp/psr.up for this, it is controlled by
* the user
*
* if task is current, modify actual registers, otherwise modify
* thread save state, i.e., what will be restored in pfm_load_regs()
*/
mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
if ((mask & 0x1) == 0UL) continue;
ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
ctx->th_pmcs[i] &= ~0xfUL;
DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
}
/*
* make all of this visible
*/
ia64_srlz_d();
}
/*
* must always be done with task == current
*
* context must be in MASKED state when calling
*/
static void
pfm_restore_monitoring(struct task_struct *task)
{
pfm_context_t *ctx = PFM_GET_CTX(task);
unsigned long mask, ovfl_mask;
unsigned long psr, val;
int i, is_system;
is_system = ctx->ctx_fl_system;
ovfl_mask = pmu_conf->ovfl_val;
if (task != current) {
printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
return;
}
if (ctx->ctx_state != PFM_CTX_MASKED) {
printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
return;
}
psr = pfm_get_psr();
/*
* monitoring is masked via the PMC.
* As we restore their value, we do not want each counter to
* restart right away. We stop monitoring using the PSR,
* restore the PMC (and PMD) and then re-establish the psr
* as it was. Note that there can be no pending overflow at
* this point, because monitoring was MASKED.
*
* system-wide session are pinned and self-monitoring
*/
if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* disable dcr pp */
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
pfm_clear_psr_pp();
} else {
pfm_clear_psr_up();
}
/*
* first, we restore the PMD
*/
mask = ctx->ctx_used_pmds[0];
for (i = 0; mask; i++, mask>>=1) {
/* skip non used pmds */
if ((mask & 0x1) == 0) continue;
if (PMD_IS_COUNTING(i)) {
/*
* we split the 64bit value according to
* counter width
*/
val = ctx->ctx_pmds[i].val & ovfl_mask;
ctx->ctx_pmds[i].val &= ~ovfl_mask;
} else {
val = ctx->ctx_pmds[i].val;
}
ia64_set_pmd(i, val);
DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
i,
ctx->ctx_pmds[i].val,
val));
}
/*
* restore the PMCs
*/
mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
if ((mask & 0x1) == 0UL) continue;
ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
ia64_set_pmc(i, ctx->th_pmcs[i]);
DPRINT(("[%d] pmc[%d]=0x%lx\n",
task_pid_nr(task), i, ctx->th_pmcs[i]));
}
ia64_srlz_d();
/*
* must restore DBR/IBR because could be modified while masked
* XXX: need to optimize
*/
if (ctx->ctx_fl_using_dbreg) {
pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
}
/*
* now restore PSR
*/
if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* enable dcr pp */
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i();
}
pfm_set_psr_l(psr);
}
static inline void
pfm_save_pmds(unsigned long *pmds, unsigned long mask)
{
int i;
ia64_srlz_d();
for (i=0; mask; i++, mask>>=1) {
if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
}
}
/*
* reload from thread state (used for ctxw only)
*/
static inline void
pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
{
int i;
unsigned long val, ovfl_val = pmu_conf->ovfl_val;
for (i=0; mask; i++, mask>>=1) {
if ((mask & 0x1) == 0) continue;
val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
ia64_set_pmd(i, val);
}
ia64_srlz_d();
}
/*
* propagate PMD from context to thread-state
*/
static inline void
pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
{
unsigned long ovfl_val = pmu_conf->ovfl_val;
unsigned long mask = ctx->ctx_all_pmds[0];
unsigned long val;
int i;
DPRINT(("mask=0x%lx\n", mask));
for (i=0; mask; i++, mask>>=1) {
val = ctx->ctx_pmds[i].val;
/*
* We break up the 64 bit value into 2 pieces
* the lower bits go to the machine state in the
* thread (will be reloaded on ctxsw in).
* The upper part stays in the soft-counter.
*/
if (PMD_IS_COUNTING(i)) {
ctx->ctx_pmds[i].val = val & ~ovfl_val;
val &= ovfl_val;
}
ctx->th_pmds[i] = val;
DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
i,
ctx->th_pmds[i],
ctx->ctx_pmds[i].val));
}
}
/*
* propagate PMC from context to thread-state
*/
static inline void
pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
{
unsigned long mask = ctx->ctx_all_pmcs[0];
int i;
DPRINT(("mask=0x%lx\n", mask));
for (i=0; mask; i++, mask>>=1) {
/* masking 0 with ovfl_val yields 0 */
ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
}
}
static inline void
pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
{
int i;
for (i=0; mask; i++, mask>>=1) {
if ((mask & 0x1) == 0) continue;
ia64_set_pmc(i, pmcs[i]);
}
ia64_srlz_d();
}
static inline int
pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
{
return memcmp(a, b, sizeof(pfm_uuid_t));
}
static inline int
pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
{
int ret = 0;
if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
return ret;
}
static inline int
pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
{
int ret = 0;
if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
return ret;
}
static inline int
pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
int cpu, void *arg)
{
int ret = 0;
if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
return ret;
}
static inline int
pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
int cpu, void *arg)
{
int ret = 0;
if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
return ret;
}
static inline int
pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
{
int ret = 0;
if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
return ret;
}
static inline int
pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
{
int ret = 0;
if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
return ret;
}
static pfm_buffer_fmt_t *
__pfm_find_buffer_fmt(pfm_uuid_t uuid)
{
struct list_head * pos;
pfm_buffer_fmt_t * entry;
list_for_each(pos, &pfm_buffer_fmt_list) {
entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
return entry;
}
return NULL;
}
/*
* find a buffer format based on its uuid
*/
static pfm_buffer_fmt_t *
pfm_find_buffer_fmt(pfm_uuid_t uuid)
{
pfm_buffer_fmt_t * fmt;
spin_lock(&pfm_buffer_fmt_lock);
fmt = __pfm_find_buffer_fmt(uuid);
spin_unlock(&pfm_buffer_fmt_lock);
return fmt;
}
int
pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
{
int ret = 0;
/* some sanity checks */
if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
/* we need at least a handler */
if (fmt->fmt_handler == NULL) return -EINVAL;
/*
* XXX: need check validity of fmt_arg_size
*/
spin_lock(&pfm_buffer_fmt_lock);
if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
ret = -EBUSY;
goto out;
}
list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
out:
spin_unlock(&pfm_buffer_fmt_lock);
return ret;
}
EXPORT_SYMBOL(pfm_register_buffer_fmt);
int
pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
{
pfm_buffer_fmt_t *fmt;
int ret = 0;
spin_lock(&pfm_buffer_fmt_lock);
fmt = __pfm_find_buffer_fmt(uuid);
if (!fmt) {
printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
ret = -EINVAL;
goto out;
}
list_del_init(&fmt->fmt_list);
printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
out:
spin_unlock(&pfm_buffer_fmt_lock);
return ret;
}
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
extern void update_pal_halt_status(int);
static int
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
{
unsigned long flags;
/*
* validity checks on cpu_mask have been done upstream
*/
LOCK_PFS(flags);
DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
pfm_sessions.pfs_sys_sessions,
pfm_sessions.pfs_task_sessions,
pfm_sessions.pfs_sys_use_dbregs,
is_syswide,
cpu));
if (is_syswide) {
/*
* cannot mix system wide and per-task sessions
*/
if (pfm_sessions.pfs_task_sessions > 0UL) {
DPRINT(("system wide not possible, %u conflicting task_sessions\n",
pfm_sessions.pfs_task_sessions));
goto abort;
}
if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
pfm_sessions.pfs_sys_session[cpu] = task;
pfm_sessions.pfs_sys_sessions++ ;
} else {
if (pfm_sessions.pfs_sys_sessions) goto abort;
pfm_sessions.pfs_task_sessions++;
}
DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
pfm_sessions.pfs_sys_sessions,
pfm_sessions.pfs_task_sessions,
pfm_sessions.pfs_sys_use_dbregs,
is_syswide,
cpu));
/*
* disable default_idle() to go to PAL_HALT
*/
update_pal_halt_status(0);
UNLOCK_PFS(flags);
return 0;
error_conflict:
DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
cpu));
abort:
UNLOCK_PFS(flags);
return -EBUSY;
}
static int
pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
{
unsigned long flags;
/*
* validity checks on cpu_mask have been done upstream
*/
LOCK_PFS(flags);
DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
pfm_sessions.pfs_sys_sessions,
pfm_sessions.pfs_task_sessions,
pfm_sessions.pfs_sys_use_dbregs,
is_syswide,
cpu));
if (is_syswide) {
pfm_sessions.pfs_sys_session[cpu] = NULL;
/*
* would not work with perfmon+more than one bit in cpu_mask
*/
if (ctx && ctx->ctx_fl_using_dbreg) {
if (pfm_sessions.pfs_sys_use_dbregs == 0) {
printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
} else {
pfm_sessions.pfs_sys_use_dbregs--;
}
}
pfm_sessions.pfs_sys_sessions--;
} else {
pfm_sessions.pfs_task_sessions--;
}
DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
pfm_sessions.pfs_sys_sessions,
pfm_sessions.pfs_task_sessions,
pfm_sessions.pfs_sys_use_dbregs,
is_syswide,
cpu));
/*
* if possible, enable default_idle() to go into PAL_HALT
*/
if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
update_pal_halt_status(1);
UNLOCK_PFS(flags);
return 0;
}
/*
* removes virtual mapping of the sampling buffer.
* IMPORTANT: cannot be called with interrupts disable, e.g. inside
* a PROTECT_CTX() section.
*/
static int
pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
{
struct task_struct *task = current;
int r;
/* sanity checks */
if (task->mm == NULL || size == 0UL || vaddr == NULL) {
printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
return -EINVAL;
}
DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
/*
* does the actual unmapping
*/
r = vm_munmap((unsigned long)vaddr, size);
if (r !=0) {
printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
}
DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
return 0;
}
/*
* free actual physical storage used by sampling buffer
*/
#if 0
static int
pfm_free_smpl_buffer(pfm_context_t *ctx)
{
pfm_buffer_fmt_t *fmt;
if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
/*
* we won't use the buffer format anymore
*/
fmt = ctx->ctx_buf_fmt;
DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
ctx->ctx_smpl_hdr,
ctx->ctx_smpl_size,
ctx->ctx_smpl_vaddr));
pfm_buf_fmt_exit(fmt, current, NULL, NULL);
/*
* free the buffer
*/
pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
ctx->ctx_smpl_hdr = NULL;
ctx->ctx_smpl_size = 0UL;
return 0;
invalid_free:
printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
return -EINVAL;
}
#endif
static inline void
pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
{
if (fmt == NULL) return;
pfm_buf_fmt_exit(fmt, current, NULL, NULL);
}
/*
* pfmfs should _never_ be mounted by userland - too much of security hassle,
* no real gain from having the whole whorehouse mounted. So we don't need
* any operations on the root directory. However, we need a non-trivial
* d_name - pfm: will go nicely and kill the special-casing in procfs.
*/
static struct vfsmount *pfmfs_mnt __read_mostly;
static int __init
init_pfm_fs(void)
{
int err = register_filesystem(&pfm_fs_type);
if (!err) {
pfmfs_mnt = kern_mount(&pfm_fs_type);
err = PTR_ERR(pfmfs_mnt);
if (IS_ERR(pfmfs_mnt))
unregister_filesystem(&pfm_fs_type);
else
err = 0;
}
return err;
}
static ssize_t
pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
{
pfm_context_t *ctx;
pfm_msg_t *msg;
ssize_t ret;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
if (PFM_IS_FILE(filp) == 0) {
printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
return -EINVAL;
}
ctx = filp->private_data;
if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
return -EINVAL;
}
/*
* check even when there is no message
*/
if (size < sizeof(pfm_msg_t)) {
DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
return -EINVAL;
}
PROTECT_CTX(ctx, flags);
/*
* put ourselves on the wait queue
*/
add_wait_queue(&ctx->ctx_msgq_wait, &wait);
for(;;) {
/*
* check wait queue
*/
set_current_state(TASK_INTERRUPTIBLE);
DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
ret = 0;
if(PFM_CTXQ_EMPTY(ctx) == 0) break;
UNPROTECT_CTX(ctx, flags);
/*
* check non-blocking read
*/
ret = -EAGAIN;
if(filp->f_flags & O_NONBLOCK) break;
/*
* check pending signals
*/
if(signal_pending(current)) {
ret = -EINTR;
break;
}
/*
* no message, so wait
*/
schedule();
PROTECT_CTX(ctx, flags);
}
DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
set_current_state(TASK_RUNNING);
remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
if (ret < 0) goto abort;
ret = -EINVAL;
msg = pfm_get_next_msg(ctx);
if (msg == NULL) {
printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
goto abort_locked;
}
DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
ret = -EFAULT;
if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
abort_locked:
UNPROTECT_CTX(ctx, flags);
abort:
return ret;
}
static ssize_t
pfm_write(struct file *file, const char __user *ubuf,
size_t size, loff_t *ppos)
{
DPRINT(("pfm_write called\n"));
return -EINVAL;
}
static unsigned int
pfm_poll(struct file *filp, poll_table * wait)
{
pfm_context_t *ctx;
unsigned long flags;
unsigned int mask = 0;
if (PFM_IS_FILE(filp) == 0) {
printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
return 0;
}
ctx = filp->private_data;
if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
return 0;
}
DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
poll_wait(filp, &ctx->ctx_msgq_wait, wait);
PROTECT_CTX(ctx, flags);
if (PFM_CTXQ_EMPTY(ctx) == 0)
mask = POLLIN | POLLRDNORM;
UNPROTECT_CTX(ctx, flags);
DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
return mask;
}
static long
pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
DPRINT(("pfm_ioctl called\n"));
return -EINVAL;
}
/*
* interrupt cannot be masked when coming here
*/
static inline int
pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
{
int ret;
ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
task_pid_nr(current),
fd,
on,
ctx->ctx_async_queue, ret));
return ret;
}
static int
pfm_fasync(int fd, struct file *filp, int on)
{
pfm_context_t *ctx;
int ret;
if (PFM_IS_FILE(filp) == 0) {
printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
return -EBADF;
}
ctx = filp->private_data;
if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
return -EBADF;
}
/*
* we cannot mask interrupts during this call because this may
* may go to sleep if memory is not readily avalaible.
*
* We are protected from the conetxt disappearing by the get_fd()/put_fd()
* done in caller. Serialization of this function is ensured by caller.
*/
ret = pfm_do_fasync(fd, filp, ctx, on);
DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
fd,
on,
ctx->ctx_async_queue, ret));
return ret;
}
#ifdef CONFIG_SMP
/*
* this function is exclusively called from pfm_close().
* The context is not protected at that time, nor are interrupts
* on the remote CPU. That's necessary to avoid deadlocks.
*/
static void
pfm_syswide_force_stop(void *info)
{
pfm_context_t *ctx = (pfm_context_t *)info;
struct pt_regs *regs = task_pt_regs(current);
struct task_struct *owner;
unsigned long flags;
int ret;
if (ctx->ctx_cpu != smp_processor_id()) {
printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
ctx->ctx_cpu,
smp_processor_id());
return;
}
owner = GET_PMU_OWNER();
if (owner != ctx->ctx_task) {
printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
smp_processor_id(),
task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
return;
}
if (GET_PMU_CTX() != ctx) {
printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
smp_processor_id(),
GET_PMU_CTX(), ctx);
return;
}
DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
/*
* the context is already protected in pfm_close(), we simply
* need to mask interrupts to avoid a PMU interrupt race on
* this CPU
*/
local_irq_save(flags);
ret = pfm_context_unload(ctx, NULL, 0, regs);
if (ret) {
DPRINT(("context_unload returned %d\n", ret));
}
/*
* unmask interrupts, PMU interrupts are now spurious here
*/
local_irq_restore(flags);
}
static void
pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
{
int ret;
DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
}
#endif /* CONFIG_SMP */
/*
* called for each close(). Partially free resources.
* When caller is self-monitoring, the context is unloaded.
*/
static int
pfm_flush(struct file *filp, fl_owner_t id)
{
pfm_context_t *ctx;
struct task_struct *task;
struct pt_regs *regs;
unsigned long flags;
unsigned long smpl_buf_size = 0UL;
void *smpl_buf_vaddr = NULL;
int state, is_system;
if (PFM_IS_FILE(filp) == 0) {
DPRINT(("bad magic for\n"));
return -EBADF;
}
ctx = filp->private_data;
if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
return -EBADF;
}
/*
* remove our file from the async queue, if we use this mode.
* This can be done without the context being protected. We come
* here when the context has become unreachable by other tasks.
*
* We may still have active monitoring at this point and we may
* end up in pfm_overflow_handler(). However, fasync_helper()
* operates with interrupts disabled and it cleans up the
* queue. If the PMU handler is called prior to entering
* fasync_helper() then it will send a signal. If it is
* invoked after, it will find an empty queue and no
* signal will be sent. In both case, we are safe
*/
PROTECT_CTX(ctx, flags);
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
regs = task_pt_regs(task);
DPRINT(("ctx_state=%d is_current=%d\n",
state,
task == current ? 1 : 0));
/*
* if state == UNLOADED, then task is NULL
*/
/*
* we must stop and unload because we are losing access to the context.
*/
if (task == current) {
#ifdef CONFIG_SMP
/*
* the task IS the owner but it migrated to another CPU: that's bad
* but we must handle this cleanly. Unfortunately, the kernel does
* not provide a mechanism to block migration (while the context is loaded).
*
* We need to release the resource on the ORIGINAL cpu.
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
/*
* keep context protected but unmask interrupt for IPI
*/
local_irq_restore(flags);
pfm_syswide_cleanup_other_cpu(ctx);
/*
* restore interrupt masking
*/
local_irq_save(flags);
/*
* context is unloaded at this point
*/
} else
#endif /* CONFIG_SMP */
{
DPRINT(("forcing unload\n"));
/*
* stop and unload, returning with state UNLOADED
* and session unreserved.
*/
pfm_context_unload(ctx, NULL, 0, regs);
DPRINT(("ctx_state=%d\n", ctx->ctx_state));
}
}
/*
* remove virtual mapping, if any, for the calling task.
* cannot reset ctx field until last user is calling close().
*
* ctx_smpl_vaddr must never be cleared because it is needed
* by every task with access to the context
*
* When called from do_exit(), the mm context is gone already, therefore
* mm is NULL, i.e., the VMA is already gone and we do not have to
* do anything here
*/
if (ctx->ctx_smpl_vaddr && current->mm) {
smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
smpl_buf_size = ctx->ctx_smpl_size;
}
UNPROTECT_CTX(ctx, flags);
/*
* if there was a mapping, then we systematically remove it
* at this point. Cannot be done inside critical section
* because some VM function reenables interrupts.
*
*/
if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
return 0;
}
/*
* called either on explicit close() or from exit_files().
* Only the LAST user of the file gets to this point, i.e., it is
* called only ONCE.
*
* IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
* (fput()),i.e, last task to access the file. Nobody else can access the
* file at this point.
*
* When called from exit_files(), the VMA has been freed because exit_mm()
* is executed before exit_files().
*
* When called from exit_files(), the current task is not yet ZOMBIE but we
* flush the PMU state to the context.
*/
static int
pfm_close(struct inode *inode, struct file *filp)
{
pfm_context_t *ctx;
struct task_struct *task;
struct pt_regs *regs;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
unsigned long smpl_buf_size = 0UL;
void *smpl_buf_addr = NULL;
int free_possible = 1;
int state, is_system;
DPRINT(("pfm_close called private=%p\n", filp->private_data));
if (PFM_IS_FILE(filp) == 0) {
DPRINT(("bad magic\n"));
return -EBADF;
}
ctx = filp->private_data;
if (ctx == NULL) {
printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
return -EBADF;
}
PROTECT_CTX(ctx, flags);
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
regs = task_pt_regs(task);
DPRINT(("ctx_state=%d is_current=%d\n",
state,
task == current ? 1 : 0));
/*
* if task == current, then pfm_flush() unloaded the context
*/
if (state == PFM_CTX_UNLOADED) goto doit;
/*
* context is loaded/masked and task != current, we need to
* either force an unload or go zombie
*/
/*
* The task is currently blocked or will block after an overflow.
* we must force it to wakeup to get out of the
* MASKED state and transition to the unloaded state by itself.
*
* This situation is only possible for per-task mode
*/
if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
/*
* set a "partial" zombie state to be checked
* upon return from down() in pfm_handle_work().
*
* We cannot use the ZOMBIE state, because it is checked
* by pfm_load_regs() which is called upon wakeup from down().
* In such case, it would free the context and then we would
* return to pfm_handle_work() which would access the
* stale context. Instead, we set a flag invisible to pfm_load_regs()
* but visible to pfm_handle_work().
*
* For some window of time, we have a zombie context with
* ctx_state = MASKED and not ZOMBIE
*/
ctx->ctx_fl_going_zombie = 1;
/*
* force task to wake up from MASKED state
*/
complete(&ctx->ctx_restart_done);
DPRINT(("waking up ctx_state=%d\n", state));
/*
* put ourself to sleep waiting for the other
* task to report completion
*
* the context is protected by mutex, therefore there
* is no risk of being notified of completion before
* begin actually on the waitq.
*/
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&ctx->ctx_zombieq, &wait);
UNPROTECT_CTX(ctx, flags);
/*
* XXX: check for signals :
* - ok for explicit close
* - not ok when coming from exit_files()
*/
schedule();
PROTECT_CTX(ctx, flags);
remove_wait_queue(&ctx->ctx_zombieq, &wait);
set_current_state(TASK_RUNNING);
/*
* context is unloaded at this point
*/
DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
}
else if (task != current) {
#ifdef CONFIG_SMP
/*
* switch context to zombie state
*/
ctx->ctx_state = PFM_CTX_ZOMBIE;
DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
/*
* cannot free the context on the spot. deferred until
* the task notices the ZOMBIE state
*/
free_possible = 0;
#else
pfm_context_unload(ctx, NULL, 0, regs);
#endif
}
doit:
/* reload state, may have changed during opening of critical section */
state = ctx->ctx_state;
/*
* the context is still attached to a task (possibly current)
* we cannot destroy it right now
*/
/*
* we must free the sampling buffer right here because
* we cannot rely on it being cleaned up later by the
* monitored task. It is not possible to free vmalloc'ed
* memory in pfm_load_regs(). Instead, we remove the buffer
* now. should there be subsequent PMU overflow originally
* meant for sampling, the will be converted to spurious
* and that's fine because the monitoring tools is gone anyway.
*/
if (ctx->ctx_smpl_hdr) {
smpl_buf_addr = ctx->ctx_smpl_hdr;
smpl_buf_size = ctx->ctx_smpl_size;
/* no more sampling */
ctx->ctx_smpl_hdr = NULL;
ctx->ctx_fl_is_sampling = 0;
}
DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
state,
free_possible,
smpl_buf_addr,
smpl_buf_size));
if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
/*
* UNLOADED that the session has already been unreserved.
*/
if (state == PFM_CTX_ZOMBIE) {
pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
}
/*
* disconnect file descriptor from context must be done
* before we unlock.
*/
filp->private_data = NULL;
/*
* if we free on the spot, the context is now completely unreachable
* from the callers side. The monitored task side is also cut, so we
* can freely cut.
*
* If we have a deferred free, only the caller side is disconnected.
*/
UNPROTECT_CTX(ctx, flags);
/*
* All memory free operations (especially for vmalloc'ed memory)
* MUST be done with interrupts ENABLED.
*/
if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
/*
* return the memory used by the context
*/
if (free_possible) pfm_context_free(ctx);
return 0;
}
static int
pfm_no_open(struct inode *irrelevant, struct file *dontcare)
{
DPRINT(("pfm_no_open called\n"));
return -ENXIO;
}
static const struct file_operations pfm_file_ops = {
.llseek = no_llseek,
.read = pfm_read,
.write = pfm_write,
.poll = pfm_poll,
.unlocked_ioctl = pfm_ioctl,
.open = pfm_no_open, /* special open code to disallow open via /proc */
.fasync = pfm_fasync,
.release = pfm_close,
.flush = pfm_flush
};
static int
pfmfs_delete_dentry(const struct dentry *dentry)
{
return 1;
}
static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
dentry->d_inode->i_ino);
}
static const struct dentry_operations pfmfs_dentry_operations = {
.d_delete = pfmfs_delete_dentry,
.d_dname = pfmfs_dname,
};
static struct file *
pfm_alloc_file(pfm_context_t *ctx)
{
struct file *file;
struct inode *inode;
struct path path;
struct qstr this = { .name = "" };
/*
* allocate a new inode
*/
inode = new_inode(pfmfs_mnt->mnt_sb);
if (!inode)
return ERR_PTR(-ENOMEM);
DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
inode->i_mode = S_IFCHR|S_IRUGO;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
/*
* allocate a new dcache entry
*/
path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
if (!path.dentry) {
iput(inode);
return ERR_PTR(-ENOMEM);
}
path.mnt = mntget(pfmfs_mnt);
d_add(path.dentry, inode);
file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
if (!file) {
path_put(&path);
return ERR_PTR(-ENFILE);
}
file->f_flags = O_RDONLY;
file->private_data = ctx;
return file;
}
static int
pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
{
DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
while (size > 0) {
unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
return -ENOMEM;
addr += PAGE_SIZE;
buf += PAGE_SIZE;
size -= PAGE_SIZE;
}
return 0;
}
/*
* allocate a sampling buffer and remaps it into the user address space of the task
*/
static int
pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma = NULL;
unsigned long size;
void *smpl_buf;
/*
* the fixed header + requested size and align to page boundary
*/
size = PAGE_ALIGN(rsize);
DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
/*
* check requested size to avoid Denial-of-service attacks
* XXX: may have to refine this test
* Check against address space limit.
*
* if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
* return -ENOMEM;
*/
if (size > task_rlimit(task, RLIMIT_MEMLOCK))
return -ENOMEM;
/*
* We do the easy to undo allocations first.
*
* pfm_rvmalloc(), clears the buffer, so there is no leak
*/
smpl_buf = pfm_rvmalloc(size);
if (smpl_buf == NULL) {
DPRINT(("Can't allocate sampling buffer\n"));
return -ENOMEM;
}
DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
DPRINT(("Cannot allocate vma\n"));
goto error_kmem;
}
INIT_LIST_HEAD(&vma->anon_vma_chain);
/*
* partially initialize the vma for the sampling buffer
*/
vma->vm_mm = mm;
vma->vm_file = filp;
vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
/*
* Now we have everything we need and we can initialize
* and connect all the data structures
*/
ctx->ctx_smpl_hdr = smpl_buf;
ctx->ctx_smpl_size = size; /* aligned size */
/*
* Let's do the difficult operations next.
*
* now we atomically find some area in the address space and
* remap the buffer in it.
*/
down_write(&task->mm->mmap_sem);
/* find some free area in address space, must have mmap sem held */
vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
if (vma->vm_start == 0UL) {
DPRINT(("Cannot find unmapped area for size %ld\n", size));
up_write(&task->mm->mmap_sem);
goto error;
}
vma->vm_end = vma->vm_start + size;
vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
/* can only be applied to current task, need to have the mm semaphore held when called */
if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
DPRINT(("Can't remap buffer\n"));
up_write(&task->mm->mmap_sem);
goto error;
}
get_file(filp);
/*
* now insert the vma in the vm list for the process, must be
* done with mmap lock held
*/
insert_vm_struct(mm, vma);
mm->total_vm += size >> PAGE_SHIFT;
vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
vma_pages(vma));
up_write(&task->mm->mmap_sem);
/*
* keep track of user level virtual address
*/
ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
*(unsigned long *)user_vaddr = vma->vm_start;
return 0;
error:
kmem_cache_free(vm_area_cachep, vma);
error_kmem:
pfm_rvfree(smpl_buf, size);
return -ENOMEM;
}
/*
* XXX: do something better here
*/
static int
pfm_bad_permissions(struct task_struct *task)
{
const struct cred *tcred;
uid_t uid = current_uid();
gid_t gid = current_gid();
int ret;
rcu_read_lock();
tcred = __task_cred(task);
/* inspired by ptrace_attach() */
DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
uid,
gid,
tcred->euid,
tcred->suid,
tcred->uid,
tcred->egid,
tcred->sgid));
ret = ((uid != tcred->euid)
|| (uid != tcred->suid)
|| (uid != tcred->uid)
|| (gid != tcred->egid)
|| (gid != tcred->sgid)
|| (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE);
rcu_read_unlock();
return ret;
}
static int
pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
{
int ctx_flags;
/* valid signal */
ctx_flags = pfx->ctx_flags;
if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
/*
* cannot block in this mode
*/
if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
return -EINVAL;
}
} else {
}
/* probably more to add here */
return 0;
}
static int
pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
unsigned int cpu, pfarg_context_t *arg)
{
pfm_buffer_fmt_t *fmt = NULL;
unsigned long size = 0UL;
void *uaddr = NULL;
void *fmt_arg = NULL;
int ret = 0;
#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
/* invoke and lock buffer format, if found */
fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
if (fmt == NULL) {
DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
return -EINVAL;
}
/*
* buffer argument MUST be contiguous to pfarg_context_t
*/
if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
if (ret) goto error;
/* link buffer format and context */
ctx->ctx_buf_fmt = fmt;
ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */
/*
* check if buffer format wants to use perfmon buffer allocation/mapping service
*/
ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
if (ret) goto error;
if (size) {
/*
* buffer is always remapped into the caller's address space
*/
ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
if (ret) goto error;
/* keep track of user address of buffer */
arg->ctx_smpl_vaddr = uaddr;
}
ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
error:
return ret;
}
static void
pfm_reset_pmu_state(pfm_context_t *ctx)
{
int i;
/*
* install reset values for PMC.
*/
for (i=1; PMC_IS_LAST(i) == 0; i++) {
if (PMC_IS_IMPL(i) == 0) continue;
ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
}
/*
* PMD registers are set to 0UL when the context in memset()
*/
/*
* On context switched restore, we must restore ALL pmc and ALL pmd even
* when they are not actively used by the task. In UP, the incoming process
* may otherwise pick up left over PMC, PMD state from the previous process.
* As opposed to PMD, stale PMC can cause harm to the incoming
* process because they may change what is being measured.
* Therefore, we must systematically reinstall the entire
* PMC state. In SMP, the same thing is possible on the
* same CPU but also on between 2 CPUs.
*
* The problem with PMD is information leaking especially
* to user level when psr.sp=0
*
* There is unfortunately no easy way to avoid this problem
* on either UP or SMP. This definitively slows down the
* pfm_load_regs() function.
*/
/*
* bitmask of all PMCs accessible to this context
*
* PMC0 is treated differently.
*/
ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
/*
* bitmask of all PMDs that are accessible to this context
*/
ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
/*
* useful in case of re-enable after disable
*/
ctx->ctx_used_ibrs[0] = 0UL;
ctx->ctx_used_dbrs[0] = 0UL;
}
static int
pfm_ctx_getsize(void *arg, size_t *sz)
{
pfarg_context_t *req = (pfarg_context_t *)arg;
pfm_buffer_fmt_t *fmt;
*sz = 0;
if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
if (fmt == NULL) {
DPRINT(("cannot find buffer format\n"));
return -EINVAL;
}
/* get just enough to copy in user parameters */
*sz = fmt->fmt_arg_size;
DPRINT(("arg_size=%lu\n", *sz));
return 0;
}
/*
* cannot attach if :
* - kernel task
* - task not owned by caller
* - task incompatible with context mode
*/
static int
pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
{
/*
* no kernel task or task not owner by caller
*/
if (task->mm == NULL) {
DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
return -EPERM;
}
if (pfm_bad_permissions(task)) {
DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
return -EPERM;
}
/*
* cannot block in self-monitoring mode
*/
if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
return -EINVAL;
}
if (task->exit_state == EXIT_ZOMBIE) {
DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
return -EBUSY;
}
/*
* always ok for self
*/
if (task == current) return 0;
if (!task_is_stopped_or_traced(task)) {
DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
return -EBUSY;
}
/*
* make sure the task is off any CPU
*/
wait_task_inactive(task, 0);
/* more to come... */
return 0;
}
static int
pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
{
struct task_struct *p = current;
int ret;
/* XXX: need to add more checks here */
if (pid < 2) return -EPERM;
if (pid != task_pid_vnr(current)) {
read_lock(&tasklist_lock);
p = find_task_by_vpid(pid);
/* make sure task cannot go away while we operate on it */
if (p) get_task_struct(p);
read_unlock(&tasklist_lock);
if (p == NULL) return -ESRCH;
}
ret = pfm_task_incompatible(ctx, p);
if (ret == 0) {
*task = p;
} else if (p != current) {
pfm_put_task(p);
}
return ret;
}
static int
pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
pfarg_context_t *req = (pfarg_context_t *)arg;
struct file *filp;
struct path path;
int ctx_flags;
int fd;
int ret;
/* let's check the arguments first */
ret = pfarg_is_sane(current, req);
if (ret < 0)
return ret;
ctx_flags = req->ctx_flags;
ret = -ENOMEM;
fd = get_unused_fd();
if (fd < 0)
return fd;
ctx = pfm_context_alloc(ctx_flags);
if (!ctx)
goto error;
filp = pfm_alloc_file(ctx);
if (IS_ERR(filp)) {
ret = PTR_ERR(filp);
goto error_file;
}
req->ctx_fd = ctx->ctx_fd = fd;
/*
* does the user want to sample?
*/
if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
if (ret)
goto buffer_error;
}
DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
ctx,
ctx_flags,
ctx->ctx_fl_system,
ctx->ctx_fl_block,
ctx->ctx_fl_excl_idle,
ctx->ctx_fl_no_msg,
ctx->ctx_fd));
/*
* initialize soft PMU state
*/
pfm_reset_pmu_state(ctx);
fd_install(fd, filp);
return 0;
buffer_error:
path = filp->f_path;
put_filp(filp);
path_put(&path);
if (ctx->ctx_buf_fmt) {
pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
}
error_file:
pfm_context_free(ctx);
error:
put_unused_fd(fd);
return ret;
}
static inline unsigned long
pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
{
unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
extern unsigned long carta_random32 (unsigned long seed);
if (reg->flags & PFM_REGFL_RANDOM) {
new_seed = carta_random32(old_seed);
val -= (old_seed & mask); /* counter values are negative numbers! */
if ((mask >> 32) != 0)
/* construct a full 64-bit random value: */
new_seed |= carta_random32(old_seed >> 32) << 32;
reg->seed = new_seed;
}
reg->lval = val;
return val;
}
static void
pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
{
unsigned long mask = ovfl_regs[0];
unsigned long reset_others = 0UL;
unsigned long val;
int i;
/*
* now restore reset value on sampling overflowed counters
*/
mask >>= PMU_FIRST_COUNTER;
for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
if ((mask & 0x1UL) == 0UL) continue;
ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
}
/*
* Now take care of resetting the other registers
*/
for(i = 0; reset_others; i++, reset_others >>= 1) {
if ((reset_others & 0x1) == 0) continue;
ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
is_long_reset ? "long" : "short", i, val));
}
}
static void
pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
{
unsigned long mask = ovfl_regs[0];
unsigned long reset_others = 0UL;
unsigned long val;
int i;
DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
if (ctx->ctx_state == PFM_CTX_MASKED) {
pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
return;
}
/*
* now restore reset value on sampling overflowed counters
*/
mask >>= PMU_FIRST_COUNTER;
for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
if ((mask & 0x1UL) == 0UL) continue;
val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
pfm_write_soft_counter(ctx, i, val);
}
/*
* Now take care of resetting the other registers
*/
for(i = 0; reset_others; i++, reset_others >>= 1) {
if ((reset_others & 0x1) == 0) continue;
val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
if (PMD_IS_COUNTING(i)) {
pfm_write_soft_counter(ctx, i, val);
} else {
ia64_set_pmd(i, val);
}
DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
is_long_reset ? "long" : "short", i, val));
}
ia64_srlz_d();
}
static int
pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct task_struct *task;
pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned long value, pmc_pm;
unsigned long smpl_pmds, reset_pmds, impl_pmds;
unsigned int cnum, reg_flags, flags, pmc_type;
int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
int is_monitor, is_counting, state;
int ret = -EINVAL;
pfm_reg_check_t wr_func;
#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system;
task = ctx->ctx_task;
impl_pmds = pmu_conf->impl_pmds[0];
if (state == PFM_CTX_ZOMBIE) return -EINVAL;
if (is_loaded) {
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
}
expert_mode = pfm_sysctl.expert_mode;
for (i = 0; i < count; i++, req++) {
cnum = req->reg_num;
reg_flags = req->reg_flags;
value = req->reg_value;
smpl_pmds = req->reg_smpl_pmds[0];
reset_pmds = req->reg_reset_pmds[0];
flags = 0;
if (cnum >= PMU_MAX_PMCS) {
DPRINT(("pmc%u is invalid\n", cnum));
goto error;
}
pmc_type = pmu_conf->pmc_desc[cnum].type;
pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
/*
* we reject all non implemented PMC as well
* as attempts to modify PMC[0-3] which are used
* as status registers by the PMU
*/
if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
goto error;
}
wr_func = pmu_conf->pmc_desc[cnum].write_check;
/*
* If the PMC is a monitor, then if the value is not the default:
* - system-wide session: PMCx.pm=1 (privileged monitor)
* - per-task : PMCx.pm=0 (user monitor)
*/
if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
cnum,
pmc_pm,
is_system));
goto error;
}
if (is_counting) {
/*
* enforce generation of overflow interrupt. Necessary on all
* CPUs.
*/
value |= 1 << PMU_PMC_OI;
if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
flags |= PFM_REGFL_OVFL_NOTIFY;
}
if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
/* verify validity of smpl_pmds */
if ((smpl_pmds & impl_pmds) != smpl_pmds) {
DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
goto error;
}
/* verify validity of reset_pmds */
if ((reset_pmds & impl_pmds) != reset_pmds) {
DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
goto error;
}
} else {
if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
goto error;
}
/* eventid on non-counting monitors are ignored */
}
/*
* execute write checker, if any
*/
if (likely(expert_mode == 0 && wr_func)) {
ret = (*wr_func)(task, ctx, cnum, &value, regs);
if (ret) goto error;
ret = -EINVAL;
}
/*
* no error on this register
*/
PFM_REG_RETFLAG_SET(req->reg_flags, 0);
/*
* Now we commit the changes to the software state
*/
/*
* update overflow information
*/
if (is_counting) {
/*
* full flag update each time a register is programmed
*/
ctx->ctx_pmds[cnum].flags = flags;
ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
/*
* Mark all PMDS to be accessed as used.
*
* We do not keep track of PMC because we have to
* systematically restore ALL of them.
*
* We do not update the used_monitors mask, because
* if we have not programmed them, then will be in
* a quiescent state, therefore we will not need to
* mask/restore then when context is MASKED.
*/
CTX_USED_PMD(ctx, reset_pmds);
CTX_USED_PMD(ctx, smpl_pmds);
/*
* make sure we do not try to reset on
* restart because we have established new values
*/
if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
}
/*
* Needed in case the user does not initialize the equivalent
* PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
* possible leak here.
*/
CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
/*
* keep track of the monitor PMC that we are using.
* we save the value of the pmc in ctx_pmcs[] and if
* the monitoring is not stopped for the context we also
* place it in the saved state area so that it will be
* picked up later by the context switch code.
*
* The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
*
* The value in th_pmcs[] may be modified on overflow, i.e., when
* monitoring needs to be stopped.
*/
if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
/*
* update context state
*/
ctx->ctx_pmcs[cnum] = value;
if (is_loaded) {
/*
* write thread state
*/
if (is_system == 0) ctx->th_pmcs[cnum] = value;
/*
* write hardware register if we can
*/
if (can_access_pmu) {
ia64_set_pmc(cnum, value);
}
#ifdef CONFIG_SMP
else {
/*
* per-task SMP only here
*
* we are guaranteed that the task is not running on the other CPU,
* we indicate that this PMD will need to be reloaded if the task
* is rescheduled on the CPU it ran last on.
*/
ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
}
#endif
}
DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
cnum,
value,
is_loaded,
can_access_pmu,
flags,
ctx->ctx_all_pmcs[0],
ctx->ctx_used_pmds[0],
ctx->ctx_pmds[cnum].eventid,
smpl_pmds,
reset_pmds,
ctx->ctx_reload_pmcs[0],
ctx->ctx_used_monitors[0],
ctx->ctx_ovfl_regs[0]));
}
/*
* make sure the changes are visible
*/
if (can_access_pmu) ia64_srlz_d();
return 0;
error:
PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
return ret;
}
static int
pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct task_struct *task;
pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned long value, hw_value, ovfl_mask;
unsigned int cnum;
int i, can_access_pmu = 0, state;
int is_counting, is_loaded, is_system, expert_mode;
int ret = -EINVAL;
pfm_reg_check_t wr_func;
state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system;
ovfl_mask = pmu_conf->ovfl_val;
task = ctx->ctx_task;
if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
/*
* on both UP and SMP, we can only write to the PMC when the task is
* the owner of the local PMU.
*/
if (likely(is_loaded)) {
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
}
expert_mode = pfm_sysctl.expert_mode;
for (i = 0; i < count; i++, req++) {
cnum = req->reg_num;
value = req->reg_value;
if (!PMD_IS_IMPL(cnum)) {
DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
goto abort_mission;
}
is_counting = PMD_IS_COUNTING(cnum);
wr_func = pmu_conf->pmd_desc[cnum].write_check;
/*
* execute write checker, if any
*/
if (unlikely(expert_mode == 0 && wr_func)) {
unsigned long v = value;
ret = (*wr_func)(task, ctx, cnum, &v, regs);
if (ret) goto abort_mission;
value = v;
ret = -EINVAL;
}
/*
* no error on this register
*/
PFM_REG_RETFLAG_SET(req->reg_flags, 0);
/*
* now commit changes to software state
*/
hw_value = value;
/*
* update virtualized (64bits) counter
*/
if (is_counting) {
/*
* write context state
*/
ctx->ctx_pmds[cnum].lval = value;
/*
* when context is load we use the split value
*/
if (is_loaded) {
hw_value = value & ovfl_mask;
value = value & ~ovfl_mask;
}
}
/*
* update reset values (not just for counters)
*/
ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
/*
* update randomization parameters (not just for counters)
*/
ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
/*
* update context value
*/
ctx->ctx_pmds[cnum].val = value;
/*
* Keep track of what we use
*
* We do not keep track of PMC because we have to
* systematically restore ALL of them.
*/
CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
/*
* mark this PMD register used as well
*/
CTX_USED_PMD(ctx, RDEP(cnum));
/*
* make sure we do not try to reset on
* restart because we have established new values
*/
if (is_counting && state == PFM_CTX_MASKED) {
ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
}
if (is_loaded) {
/*
* write thread state
*/
if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
/*
* write hardware register if we can
*/
if (can_access_pmu) {
ia64_set_pmd(cnum, hw_value);
} else {
#ifdef CONFIG_SMP
/*
* we are guaranteed that the task is not running on the other CPU,
* we indicate that this PMD will need to be reloaded if the task
* is rescheduled on the CPU it ran last on.
*/
ctx->ctx_reload_pmds[0] |= 1UL << cnum;
#endif
}
}
DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
"long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
cnum,
value,
is_loaded,
can_access_pmu,
hw_value,
ctx->ctx_pmds[cnum].val,
ctx->ctx_pmds[cnum].short_reset,
ctx->ctx_pmds[cnum].long_reset,
PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
ctx->ctx_pmds[cnum].seed,
ctx->ctx_pmds[cnum].mask,
ctx->ctx_used_pmds[0],
ctx->ctx_pmds[cnum].reset_pmds[0],
ctx->ctx_reload_pmds[0],
ctx->ctx_all_pmds[0],
ctx->ctx_ovfl_regs[0]));
}
/*
* make changes visible
*/
if (can_access_pmu) ia64_srlz_d();
return 0;
abort_mission:
/*
* for now, we have only one possibility for error
*/
PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
return ret;
}
/*
* By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
* Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
* interrupt is delivered during the call, it will be kept pending until we leave, making
* it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
* guaranteed to return consistent data to the user, it may simply be old. It is not
* trivial to treat the overflow while inside the call because you may end up in
* some module sampling buffer code causing deadlocks.
*/
static int
pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct task_struct *task;
unsigned long val = 0UL, lval, ovfl_mask, sval;
pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned int cnum, reg_flags = 0;
int i, can_access_pmu = 0, state;
int is_loaded, is_system, is_counting, expert_mode;
int ret = -EINVAL;
pfm_reg_check_t rd_func;
/*
* access is possible when loaded only for
* self-monitoring tasks or in UP mode
*/
state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system;
ovfl_mask = pmu_conf->ovfl_val;
task = ctx->ctx_task;
if (state == PFM_CTX_ZOMBIE) return -EINVAL;
if (likely(is_loaded)) {
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
/*
* this can be true when not self-monitoring only in UP
*/
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
if (can_access_pmu) ia64_srlz_d();
}
expert_mode = pfm_sysctl.expert_mode;
DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
is_loaded,
can_access_pmu,
state));
/*
* on both UP and SMP, we can only read the PMD from the hardware register when
* the task is the owner of the local PMU.
*/
for (i = 0; i < count; i++, req++) {
cnum = req->reg_num;
reg_flags = req->reg_flags;
if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
/*
* we can only read the register that we use. That includes
* the one we explicitly initialize AND the one we want included
* in the sampling buffer (smpl_regs).
*
* Having this restriction allows optimization in the ctxsw routine
* without compromising security (leaks)
*/
if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
sval = ctx->ctx_pmds[cnum].val;
lval = ctx->ctx_pmds[cnum].lval;
is_counting = PMD_IS_COUNTING(cnum);
/*
* If the task is not the current one, then we check if the
* PMU state is still in the local live register due to lazy ctxsw.
* If true, then we read directly from the registers.
*/
if (can_access_pmu){
val = ia64_get_pmd(cnum);
} else {
/*
* context has been saved
* if context is zombie, then task does not exist anymore.
* In this case, we use the full value saved in the context (pfm_flush_regs()).
*/
val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
}
rd_func = pmu_conf->pmd_desc[cnum].read_check;
if (is_counting) {
/*
* XXX: need to check for overflow when loaded
*/
val &= ovfl_mask;
val += sval;
}
/*
* execute read checker, if any
*/
if (unlikely(expert_mode == 0 && rd_func)) {
unsigned long v = val;
ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
if (ret) goto error;
val = v;
ret = -EINVAL;
}
PFM_REG_RETFLAG_SET(reg_flags, 0);
DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
/*
* update register return value, abort all if problem during copy.
* we only modify the reg_flags field. no check mode is fine because
* access has been verified upfront in sys_perfmonctl().
*/
req->reg_value = val;
req->reg_flags = reg_flags;
req->reg_last_reset_val = lval;
}
return 0;
error:
PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
return ret;
}
int
pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
{
pfm_context_t *ctx;
if (req == NULL) return -EINVAL;
ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL;
/*
* for now limit to current task, which is enough when calling
* from overflow handler
*/
if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
return pfm_write_pmcs(ctx, req, nreq, regs);
}
EXPORT_SYMBOL(pfm_mod_write_pmcs);
int
pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
{
pfm_context_t *ctx;
if (req == NULL) return -EINVAL;
ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL;
/*
* for now limit to current task, which is enough when calling
* from overflow handler
*/
if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
return pfm_read_pmds(ctx, req, nreq, regs);
}
EXPORT_SYMBOL(pfm_mod_read_pmds);
/*
* Only call this function when a process it trying to
* write the debug registers (reading is always allowed)
*/
int
pfm_use_debug_registers(struct task_struct *task)
{
pfm_context_t *ctx = task->thread.pfm_context;
unsigned long flags;
int ret = 0;
if (pmu_conf->use_rr_dbregs == 0) return 0;
DPRINT(("called for [%d]\n", task_pid_nr(task)));
/*
* do it only once
*/
if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
/*
* Even on SMP, we do not need to use an atomic here because
* the only way in is via ptrace() and this is possible only when the
* process is stopped. Even in the case where the ctxsw out is not totally
* completed by the time we come here, there is no way the 'stopped' process
* could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
* So this is always safe.
*/
if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
LOCK_PFS(flags);
/*
* We cannot allow setting breakpoints when system wide monitoring
* sessions are using the debug registers.
*/
if (pfm_sessions.pfs_sys_use_dbregs> 0)
ret = -1;
else
pfm_sessions.pfs_ptrace_use_dbregs++;
DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
pfm_sessions.pfs_ptrace_use_dbregs,
pfm_sessions.pfs_sys_use_dbregs,
task_pid_nr(task), ret));
UNLOCK_PFS(flags);
return ret;
}
/*
* This function is called for every task that exits with the
* IA64_THREAD_DBG_VALID set. This indicates a task which was
* able to use the debug registers for debugging purposes via
* ptrace(). Therefore we know it was not using them for
* performance monitoring, so we only decrement the number
* of "ptraced" debug register users to keep the count up to date
*/
int
pfm_release_debug_registers(struct task_struct *task)
{
unsigned long flags;
int ret;
if (pmu_conf->use_rr_dbregs == 0) return 0;
LOCK_PFS(flags);
if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
ret = -1;
} else {
pfm_sessions.pfs_ptrace_use_dbregs--;
ret = 0;
}
UNLOCK_PFS(flags);
return ret;
}
static int
pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct task_struct *task;
pfm_buffer_fmt_t *fmt;
pfm_ovfl_ctrl_t rst_ctrl;
int state, is_system;
int ret = 0;
state = ctx->ctx_state;
fmt = ctx->ctx_buf_fmt;
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
switch(state) {
case PFM_CTX_MASKED:
break;
case PFM_CTX_LOADED:
if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
/* fall through */
case PFM_CTX_UNLOADED:
case PFM_CTX_ZOMBIE:
DPRINT(("invalid state=%d\n", state));
return -EBUSY;
default:
DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
return -EINVAL;
}
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
/* sanity check */
if (unlikely(task == NULL)) {
printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
return -EINVAL;
}
if (task == current || is_system) {
fmt = ctx->ctx_buf_fmt;
DPRINT(("restarting self %d ovfl=0x%lx\n",
task_pid_nr(task),
ctx->ctx_ovfl_regs[0]));
if (CTX_HAS_SMPL(ctx)) {
prefetch(ctx->ctx_smpl_hdr);
rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.bits.reset_ovfl_pmds = 0;
if (state == PFM_CTX_LOADED)
ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
else
ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
} else {
rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.bits.reset_ovfl_pmds = 1;
}
if (ret == 0) {
if (rst_ctrl.bits.reset_ovfl_pmds)
pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
if (rst_ctrl.bits.mask_monitoring == 0) {
DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
} else {
DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
// cannot use pfm_stop_monitoring(task, regs);
}
}
/*
* clear overflowed PMD mask to remove any stale information
*/
ctx->ctx_ovfl_regs[0] = 0UL;
/*
* back to LOADED state
*/
ctx->ctx_state = PFM_CTX_LOADED;
/*
* XXX: not really useful for self monitoring
*/
ctx->ctx_fl_can_restart = 0;
return 0;
}
/*
* restart another task
*/
/*
* When PFM_CTX_MASKED, we cannot issue a restart before the previous
* one is seen by the task.
*/
if (state == PFM_CTX_MASKED) {
if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
/*
* will prevent subsequent restart before this one is
* seen by other task
*/
ctx->ctx_fl_can_restart = 0;
}
/*
* if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
* the task is blocked or on its way to block. That's the normal
* restart path. If the monitoring is not masked, then the task
* can be actively monitoring and we cannot directly intervene.
* Therefore we use the trap mechanism to catch the task and
* force it to reset the buffer/reset PMDs.
*
* if non-blocking, then we ensure that the task will go into
* pfm_handle_work() before returning to user mode.
*
* We cannot explicitly reset another task, it MUST always
* be done by the task itself. This works for system wide because
* the tool that is controlling the session is logically doing
* "self-monitoring".
*/
if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
complete(&ctx->ctx_restart_done);
} else {
DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
PFM_SET_WORK_PENDING(task, 1);
set_notify_resume(task);
/*
* XXX: send reschedule if task runs on another CPU
*/
}
return 0;
}
static int
pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
unsigned int m = *(unsigned int *)arg;
pfm_sysctl.debug = m == 0 ? 0 : 1;
printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
if (m == 0) {
memset(pfm_stats, 0, sizeof(pfm_stats));
for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
}
return 0;
}
/*
* arg can be NULL and count can be zero for this function
*/
static int
pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct thread_struct *thread = NULL;
struct task_struct *task;
pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
unsigned long flags;
dbreg_t dbreg;
unsigned int rnum;
int first_time;
int ret = 0, state;
int i, can_access_pmu = 0;
int is_system, is_loaded;
if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system;
task = ctx->ctx_task;
if (state == PFM_CTX_ZOMBIE) return -EINVAL;
/*
* on both UP and SMP, we can only write to the PMC when the task is
* the owner of the local PMU.
*/
if (is_loaded) {
thread = &task->thread;
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
}
/*
* we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
* ensuring that no real breakpoint can be installed via this call.
*
* IMPORTANT: regs can be NULL in this function
*/
first_time = ctx->ctx_fl_using_dbreg == 0;
/*
* don't bother if we are loaded and task is being debugged
*/
if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
return -EBUSY;
}
/*
* check for debug registers in system wide mode
*
* If though a check is done in pfm_context_load(),
* we must repeat it here, in case the registers are
* written after the context is loaded
*/
if (is_loaded) {
LOCK_PFS(flags);
if (first_time && is_system) {
if (pfm_sessions.pfs_ptrace_use_dbregs)
ret = -EBUSY;
else
pfm_sessions.pfs_sys_use_dbregs++;
}
UNLOCK_PFS(flags);
}
if (ret != 0) return ret;
/*
* mark ourself as user of the debug registers for
* perfmon purposes.
*/
ctx->ctx_fl_using_dbreg = 1;
/*
* clear hardware registers to make sure we don't
* pick up stale state.
*
* for a system wide session, we do not use
* thread.dbr, thread.ibr because this process
* never leaves the current CPU and the state
* is shared by all processes running on it
*/
if (first_time && can_access_pmu) {
DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
for (i=0; i < pmu_conf->num_ibrs; i++) {
ia64_set_ibr(i, 0UL);
ia64_dv_serialize_instruction();
}
ia64_srlz_i();
for (i=0; i < pmu_conf->num_dbrs; i++) {
ia64_set_dbr(i, 0UL);
ia64_dv_serialize_data();
}
ia64_srlz_d();
}
/*
* Now install the values into the registers
*/
for (i = 0; i < count; i++, req++) {
rnum = req->dbreg_num;
dbreg.val = req->dbreg_value;
ret = -EINVAL;
if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
rnum, dbreg.val, mode, i, count));
goto abort_mission;
}
/*
* make sure we do not install enabled breakpoint
*/
if (rnum & 0x1) {
if (mode == PFM_CODE_RR)
dbreg.ibr.ibr_x = 0;
else
dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
}
PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
/*
* Debug registers, just like PMC, can only be modified
* by a kernel call. Moreover, perfmon() access to those
* registers are centralized in this routine. The hardware
* does not modify the value of these registers, therefore,
* if we save them as they are written, we can avoid having
* to save them on context switch out. This is made possible
* by the fact that when perfmon uses debug registers, ptrace()
* won't be able to modify them concurrently.
*/
if (mode == PFM_CODE_RR) {
CTX_USED_IBR(ctx, rnum);
if (can_access_pmu) {
ia64_set_ibr(rnum, dbreg.val);
ia64_dv_serialize_instruction();
}
ctx->ctx_ibrs[rnum] = dbreg.val;
DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
} else {
CTX_USED_DBR(ctx, rnum);
if (can_access_pmu) {
ia64_set_dbr(rnum, dbreg.val);
ia64_dv_serialize_data();
}
ctx->ctx_dbrs[rnum] = dbreg.val;
DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
}
}
return 0;
abort_mission:
/*
* in case it was our first attempt, we undo the global modifications
*/
if (first_time) {
LOCK_PFS(flags);
if (ctx->ctx_fl_system) {
pfm_sessions.pfs_sys_use_dbregs--;
}
UNLOCK_PFS(flags);
ctx->ctx_fl_using_dbreg = 0;
}
/*
* install error return flag
*/
PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
return ret;
}
static int
pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
}
static int
pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
}
int
pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
{
pfm_context_t *ctx;
if (req == NULL) return -EINVAL;
ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL;
/*
* for now limit to current task, which is enough when calling
* from overflow handler
*/
if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
return pfm_write_ibrs(ctx, req, nreq, regs);
}
EXPORT_SYMBOL(pfm_mod_write_ibrs);
int
pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
{
pfm_context_t *ctx;
if (req == NULL) return -EINVAL;
ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL;
/*
* for now limit to current task, which is enough when calling
* from overflow handler
*/
if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
return pfm_write_dbrs(ctx, req, nreq, regs);
}
EXPORT_SYMBOL(pfm_mod_write_dbrs);
static int
pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
pfarg_features_t *req = (pfarg_features_t *)arg;
req->ft_version = PFM_VERSION;
return 0;
}
static int
pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct pt_regs *tregs;
struct task_struct *task = PFM_CTX_TASK(ctx);
int state, is_system;
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
/*
* context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
*/
if (state == PFM_CTX_UNLOADED) return -EINVAL;
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
task_pid_nr(PFM_CTX_TASK(ctx)),
state,
is_system));
/*
* in system mode, we need to update the PMU directly
* and the user level state of the caller, which may not
* necessarily be the creator of the context.
*/
if (is_system) {
/*
* Update local PMU first
*
* disable dcr pp
*/
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
ia64_srlz_i();
/*
* update local cpuinfo
*/
PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
/*
* stop monitoring, does srlz.i
*/
pfm_clear_psr_pp();
/*
* stop monitoring in the caller
*/
ia64_psr(regs)->pp = 0;
return 0;
}
/*
* per-task mode
*/
if (task == current) {
/* stop monitoring at kernel level */
pfm_clear_psr_up();
/*
* stop monitoring at the user level
*/
ia64_psr(regs)->up = 0;
} else {
tregs = task_pt_regs(task);
/*
* stop monitoring at the user level
*/
ia64_psr(tregs)->up = 0;
/*
* monitoring disabled in kernel at next reschedule
*/
ctx->ctx_saved_psr_up = 0;
DPRINT(("task=[%d]\n", task_pid_nr(task)));
}
return 0;
}
static int
pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct pt_regs *tregs;
int state, is_system;
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
if (state != PFM_CTX_LOADED) return -EINVAL;
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
/*
* in system mode, we need to update the PMU directly
* and the user level state of the caller, which may not
* necessarily be the creator of the context.
*/
if (is_system) {
/*
* set user level psr.pp for the caller
*/
ia64_psr(regs)->pp = 1;
/*
* now update the local PMU and cpuinfo
*/
PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
/*
* start monitoring at kernel level
*/
pfm_set_psr_pp();
/* enable dcr pp */
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i();
return 0;
}
/*
* per-process mode
*/
if (ctx->ctx_task == current) {
/* start monitoring at kernel level */
pfm_set_psr_up();
/*
* activate monitoring at user level
*/
ia64_psr(regs)->up = 1;
} else {
tregs = task_pt_regs(ctx->ctx_task);
/*
* start monitoring at the kernel level the next
* time the task is scheduled
*/
ctx->ctx_saved_psr_up = IA64_PSR_UP;
/*
* activate monitoring at user level
*/
ia64_psr(tregs)->up = 1;
}
return 0;
}
static int
pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned int cnum;
int i;
int ret = -EINVAL;
for (i = 0; i < count; i++, req++) {
cnum = req->reg_num;
if (!PMC_IS_IMPL(cnum)) goto abort_mission;
req->reg_value = PMC_DFL_VAL(cnum);
PFM_REG_RETFLAG_SET(req->reg_flags, 0);
DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
}
return 0;
abort_mission:
PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
return ret;
}
static int
pfm_check_task_exist(pfm_context_t *ctx)
{
struct task_struct *g, *t;
int ret = -ESRCH;
read_lock(&tasklist_lock);
do_each_thread (g, t) {
if (t->thread.pfm_context == ctx) {
ret = 0;
goto out;
}
} while_each_thread (g, t);
out:
read_unlock(&tasklist_lock);
DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
return ret;
}
static int
pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct task_struct *task;
struct thread_struct *thread;
struct pfm_context_t *old;
unsigned long flags;
#ifndef CONFIG_SMP
struct task_struct *owner_task = NULL;
#endif
pfarg_load_t *req = (pfarg_load_t *)arg;
unsigned long *pmcs_source, *pmds_source;
int the_cpu;
int ret = 0;
int state, is_system, set_dbregs = 0;
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
/*
* can only load from unloaded or terminated state
*/
if (state != PFM_CTX_UNLOADED) {
DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
req->load_pid,
ctx->ctx_state));
return -EBUSY;
}
DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
DPRINT(("cannot use blocking mode on self\n"));
return -EINVAL;
}
ret = pfm_get_task(ctx, req->load_pid, &task);
if (ret) {
DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
return ret;
}
ret = -EINVAL;
/*
* system wide is self monitoring only
*/
if (is_system && task != current) {
DPRINT(("system wide is self monitoring only load_pid=%d\n",
req->load_pid));
goto error;
}
thread = &task->thread;
ret = 0;
/*
* cannot load a context which is using range restrictions,
* into a task that is being debugged.
*/
if (ctx->ctx_fl_using_dbreg) {
if (thread->flags & IA64_THREAD_DBG_VALID) {
ret = -EBUSY;
DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
goto error;
}
LOCK_PFS(flags);
if (is_system) {
if (pfm_sessions.pfs_ptrace_use_dbregs) {
DPRINT(("cannot load [%d] dbregs in use\n",
task_pid_nr(task)));
ret = -EBUSY;
} else {
pfm_sessions.pfs_sys_use_dbregs++;
DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
set_dbregs = 1;
}
}
UNLOCK_PFS(flags);
if (ret) goto error;
}
/*
* SMP system-wide monitoring implies self-monitoring.
*
* The programming model expects the task to
* be pinned on a CPU throughout the session.
* Here we take note of the current CPU at the
* time the context is loaded. No call from
* another CPU will be allowed.
*
* The pinning via shed_setaffinity()
* must be done by the calling task prior
* to this call.
*
* systemwide: keep track of CPU this session is supposed to run on
*/
the_cpu = ctx->ctx_cpu = smp_processor_id();
ret = -EBUSY;
/*
* now reserve the session
*/
ret = pfm_reserve_session(current, is_system, the_cpu);
if (ret) goto error;
/*
* task is necessarily stopped at this point.
*
* If the previous context was zombie, then it got removed in
* pfm_save_regs(). Therefore we should not see it here.
* If we see a context, then this is an active context
*
* XXX: needs to be atomic
*/
DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
thread->pfm_context, ctx));
ret = -EBUSY;
old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
if (old != NULL) {
DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
goto error_unres;
}
pfm_reset_msgq(ctx);
ctx->ctx_state = PFM_CTX_LOADED;
/*
* link context to task
*/
ctx->ctx_task = task;
if (is_system) {
/*
* we load as stopped
*/
PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
} else {
thread->flags |= IA64_THREAD_PM_VALID;
}
/*
* propagate into thread-state
*/
pfm_copy_pmds(task, ctx);
pfm_copy_pmcs(task, ctx);
pmcs_source = ctx->th_pmcs;
pmds_source = ctx->th_pmds;
/*
* always the case for system-wide
*/
if (task == current) {
if (is_system == 0) {
/* allow user level control */
ia64_psr(regs)->sp = 0;
DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
SET_LAST_CPU(ctx, smp_processor_id());
INC_ACTIVATION();
SET_ACTIVATION(ctx);
#ifndef CONFIG_SMP
/*
* push the other task out, if any
*/
owner_task = GET_PMU_OWNER();
if (owner_task) pfm_lazy_save_regs(owner_task);
#endif
}
/*
* load all PMD from ctx to PMU (as opposed to thread state)
* restore all PMC from ctx to PMU
*/
pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
ctx->ctx_reload_pmcs[0] = 0UL;
ctx->ctx_reload_pmds[0] = 0UL;
/*
* guaranteed safe by earlier check against DBG_VALID
*/
if (ctx->ctx_fl_using_dbreg) {
pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
}
/*
* set new ownership
*/
SET_PMU_OWNER(task, ctx);
DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
} else {
/*
* when not current, task MUST be stopped, so this is safe
*/
regs = task_pt_regs(task);
/* force a full reload */
ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
SET_LAST_CPU(ctx, -1);
/* initial saved psr (stopped) */
ctx->ctx_saved_psr_up = 0UL;
ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
}
ret = 0;
error_unres:
if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
error:
/*
* we must undo the dbregs setting (for system-wide)
*/
if (ret && set_dbregs) {
LOCK_PFS(flags);
pfm_sessions.pfs_sys_use_dbregs--;
UNLOCK_PFS(flags);
}
/*
* release task, there is now a link with the context
*/
if (is_system == 0 && task != current) {
pfm_put_task(task);
if (ret == 0) {
ret = pfm_check_task_exist(ctx);
if (ret) {
ctx->ctx_state = PFM_CTX_UNLOADED;
ctx->ctx_task = NULL;
}
}
}
return ret;
}
/*
* in this function, we do not need to increase the use count
* for the task via get_task_struct(), because we hold the
* context lock. If the task were to disappear while having
* a context attached, it would go through pfm_exit_thread()
* which also grabs the context lock and would therefore be blocked
* until we are here.
*/
static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
static int
pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct task_struct *task = PFM_CTX_TASK(ctx);
struct pt_regs *tregs;
int prev_state, is_system;
int ret;
DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
prev_state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
/*
* unload only when necessary
*/
if (prev_state == PFM_CTX_UNLOADED) {
DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
return 0;
}
/*
* clear psr and dcr bits
*/
ret = pfm_stop(ctx, NULL, 0, regs);
if (ret) return ret;
ctx->ctx_state = PFM_CTX_UNLOADED;
/*
* in system mode, we need to update the PMU directly
* and the user level state of the caller, which may not
* necessarily be the creator of the context.
*/
if (is_system) {
/*
* Update cpuinfo
*
* local PMU is taken care of in pfm_stop()
*/
PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
/*
* save PMDs in context
* release ownership
*/
pfm_flush_pmds(current, ctx);
/*
* at this point we are done with the PMU
* so we can unreserve the resource.
*/
if (prev_state != PFM_CTX_ZOMBIE)
pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
/*
* disconnect context from task
*/
task->thread.pfm_context = NULL;
/*
* disconnect task from context
*/
ctx->ctx_task = NULL;
/*
* There is nothing more to cleanup here.
*/
return 0;
}
/*
* per-task mode
*/
tregs = task == current ? regs : task_pt_regs(task);
if (task == current) {
/*
* cancel user level control
*/
ia64_psr(regs)->sp = 1;
DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
}
/*
* save PMDs to context
* release ownership
*/
pfm_flush_pmds(task, ctx);
/*
* at this point we are done with the PMU
* so we can unreserve the resource.
*
* when state was ZOMBIE, we have already unreserved.
*/
if (prev_state != PFM_CTX_ZOMBIE)
pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
/*
* reset activation counter and psr
*/
ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
SET_LAST_CPU(ctx, -1);
/*
* PMU state will not be restored
*/
task->thread.flags &= ~IA64_THREAD_PM_VALID;
/*
* break links between context and task
*/
task->thread.pfm_context = NULL;
ctx->ctx_task = NULL;
PFM_SET_WORK_PENDING(task, 0);
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
ctx->ctx_fl_can_restart = 0;
ctx->ctx_fl_going_zombie = 0;
DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
return 0;
}
/*
* called only from exit_thread(): task == current
* we come here only if current has a context attached (loaded or masked)
*/
void
pfm_exit_thread(struct task_struct *task)
{
pfm_context_t *ctx;
unsigned long flags;
struct pt_regs *regs = task_pt_regs(task);
int ret, state;
int free_ok = 0;
ctx = PFM_GET_CTX(task);
PROTECT_CTX(ctx, flags);
DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
state = ctx->ctx_state;
switch(state) {
case PFM_CTX_UNLOADED:
/*
* only comes to this function if pfm_context is not NULL, i.e., cannot
* be in unloaded state
*/
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
break;
case PFM_CTX_LOADED:
case PFM_CTX_MASKED:
ret = pfm_context_unload(ctx, NULL, 0, regs);
if (ret) {
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
}
DPRINT(("ctx unloaded for current state was %d\n", state));
pfm_end_notify_user(ctx);
break;
case PFM_CTX_ZOMBIE:
ret = pfm_context_unload(ctx, NULL, 0, regs);
if (ret) {
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
}
free_ok = 1;
break;
default:
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
break;
}
UNPROTECT_CTX(ctx, flags);
{ u64 psr = pfm_get_psr();
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(GET_PMU_OWNER());
BUG_ON(ia64_psr(regs)->up);
BUG_ON(ia64_psr(regs)->pp);
}
/*
* All memory free operations (especially for vmalloc'ed memory)
* MUST be done with interrupts ENABLED.
*/
if (free_ok) pfm_context_free(ctx);
}
/*
* functions MUST be listed in the increasing order of their index (see permfon.h)
*/
#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
static pfm_cmd_desc_t pfm_cmd_tab[]={
/* 0 */PFM_CMD_NONE,
/* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
/* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
/* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
/* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
/* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
/* 6 */PFM_CMD_NONE,
/* 7 */PFM_CMD_NONE,
/* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
/* 9 */PFM_CMD_NONE,
/* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
/* 11 */PFM_CMD_NONE,
/* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
/* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
/* 14 */PFM_CMD_NONE,
/* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
/* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
/* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
/* 18 */PFM_CMD_NONE,
/* 19 */PFM_CMD_NONE,
/* 20 */PFM_CMD_NONE,
/* 21 */PFM_CMD_NONE,
/* 22 */PFM_CMD_NONE,
/* 23 */PFM_CMD_NONE,
/* 24 */PFM_CMD_NONE,
/* 25 */PFM_CMD_NONE,
/* 26 */PFM_CMD_NONE,
/* 27 */PFM_CMD_NONE,
/* 28 */PFM_CMD_NONE,
/* 29 */PFM_CMD_NONE,
/* 30 */PFM_CMD_NONE,
/* 31 */PFM_CMD_NONE,
/* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
/* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
};
#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
static int
pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
{
struct task_struct *task;
int state, old_state;
recheck:
state = ctx->ctx_state;
task = ctx->ctx_task;
if (task == NULL) {
DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
return 0;
}
DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
ctx->ctx_fd,
state,
task_pid_nr(task),
task->state, PFM_CMD_STOPPED(cmd)));
/*
* self-monitoring always ok.
*
* for system-wide the caller can either be the creator of the
* context (to one to which the context is attached to) OR
* a task running on the same CPU as the session.
*/
if (task == current || ctx->ctx_fl_system) return 0;
/*
* we are monitoring another thread
*/
switch(state) {
case PFM_CTX_UNLOADED:
/*
* if context is UNLOADED we are safe to go
*/
return 0;
case PFM_CTX_ZOMBIE:
/*
* no command can operate on a zombie context
*/
DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
return -EINVAL;
case PFM_CTX_MASKED:
/*
* PMU state has been saved to software even though
* the thread may still be running.
*/
if (cmd != PFM_UNLOAD_CONTEXT) return 0;
}
/*
* context is LOADED or MASKED. Some commands may need to have
* the task stopped.
*
* We could lift this restriction for UP but it would mean that
* the user has no guarantee the task would not run between
* two successive calls to perfmonctl(). That's probably OK.
* If this user wants to ensure the task does not run, then
* the task must be stopped.
*/
if (PFM_CMD_STOPPED(cmd)) {
if (!task_is_stopped_or_traced(task)) {
DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
return -EBUSY;
}
/*
* task is now stopped, wait for ctxsw out
*
* This is an interesting point in the code.
* We need to unprotect the context because
* the pfm_save_regs() routines needs to grab
* the same lock. There are danger in doing
* this because it leaves a window open for
* another task to get access to the context
* and possibly change its state. The one thing
* that is not possible is for the context to disappear
* because we are protected by the VFS layer, i.e.,
* get_fd()/put_fd().
*/
old_state = state;
UNPROTECT_CTX(ctx, flags);
wait_task_inactive(task, 0);
PROTECT_CTX(ctx, flags);
/*
* we must recheck to verify if state has changed
*/
if (ctx->ctx_state != old_state) {
DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
goto recheck;
}
}
return 0;
}
/*
* system-call entry point (must return long)
*/
asmlinkage long
sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
{
struct file *file = NULL;
pfm_context_t *ctx = NULL;
unsigned long flags = 0UL;
void *args_k = NULL;
long ret; /* will expand int return types */
size_t base_sz, sz, xtra_sz = 0;
int narg, completed_args = 0, call_made = 0, cmd_flags;
int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
int (*getsize)(void *arg, size_t *sz);
#define PFM_MAX_ARGSIZE 4096
/*
* reject any call if perfmon was disabled at initialization
*/
if (unlikely(pmu_conf == NULL)) return -ENOSYS;
if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
DPRINT(("invalid cmd=%d\n", cmd));
return -EINVAL;
}
func = pfm_cmd_tab[cmd].cmd_func;
narg = pfm_cmd_tab[cmd].cmd_narg;
base_sz = pfm_cmd_tab[cmd].cmd_argsize;
getsize = pfm_cmd_tab[cmd].cmd_getsize;
cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
if (unlikely(func == NULL)) {
DPRINT(("invalid cmd=%d\n", cmd));
return -EINVAL;
}
DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
PFM_CMD_NAME(cmd),
cmd,
narg,
base_sz,
count));
/*
* check if number of arguments matches what the command expects
*/
if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
return -EINVAL;
restart_args:
sz = xtra_sz + base_sz*count;
/*
* limit abuse to min page size
*/
if (unlikely(sz > PFM_MAX_ARGSIZE)) {
printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
return -E2BIG;
}
/*
* allocate default-sized argument buffer
*/
if (likely(count && args_k == NULL)) {
args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
if (args_k == NULL) return -ENOMEM;
}
ret = -EFAULT;
/*
* copy arguments
*
* assume sz = 0 for command without parameters
*/
if (sz && copy_from_user(args_k, arg, sz)) {
DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
goto error_args;
}
/*
* check if command supports extra parameters
*/
if (completed_args == 0 && getsize) {
/*
* get extra parameters size (based on main argument)
*/
ret = (*getsize)(args_k, &xtra_sz);
if (ret) goto error_args;
completed_args = 1;
DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
/* retry if necessary */
if (likely(xtra_sz)) goto restart_args;
}
if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
ret = -EBADF;
file = fget(fd);
if (unlikely(file == NULL)) {
DPRINT(("invalid fd %d\n", fd));
goto error_args;
}
if (unlikely(PFM_IS_FILE(file) == 0)) {
DPRINT(("fd %d not related to perfmon\n", fd));
goto error_args;
}
ctx = file->private_data;
if (unlikely(ctx == NULL)) {
DPRINT(("no context for fd %d\n", fd));
goto error_args;
}
prefetch(&ctx->ctx_state);
PROTECT_CTX(ctx, flags);
/*
* check task is stopped
*/
ret = pfm_check_task_state(ctx, cmd, flags);
if (unlikely(ret)) goto abort_locked;
skip_fd:
ret = (*func)(ctx, args_k, count, task_pt_regs(current));
call_made = 1;
abort_locked:
if (likely(ctx)) {
DPRINT(("context unlocked\n"));
UNPROTECT_CTX(ctx, flags);
}
/* copy argument back to user, if needed */
if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
error_args:
if (file)
fput(file);
kfree(args_k);
DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
return ret;
}
static void
pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
{
pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
pfm_ovfl_ctrl_t rst_ctrl;
int state;
int ret = 0;
state = ctx->ctx_state;
/*
* Unlock sampling buffer and reset index atomically
* XXX: not really needed when blocking
*/
if (CTX_HAS_SMPL(ctx)) {
rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.bits.reset_ovfl_pmds = 0;
if (state == PFM_CTX_LOADED)
ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
else
ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
} else {
rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.bits.reset_ovfl_pmds = 1;
}
if (ret == 0) {
if (rst_ctrl.bits.reset_ovfl_pmds) {
pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
}
if (rst_ctrl.bits.mask_monitoring == 0) {
DPRINT(("resuming monitoring\n"));
if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
} else {
DPRINT(("stopping monitoring\n"));
//pfm_stop_monitoring(current, regs);
}
ctx->ctx_state = PFM_CTX_LOADED;
}
}
/*
* context MUST BE LOCKED when calling
* can only be called for current
*/
static void
pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
{
int ret;
DPRINT(("entering for [%d]\n", task_pid_nr(current)));
ret = pfm_context_unload(ctx, NULL, 0, regs);
if (ret) {
printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
}
/*
* and wakeup controlling task, indicating we are now disconnected
*/
wake_up_interruptible(&ctx->ctx_zombieq);
/*
* given that context is still locked, the controlling
* task will only get access when we return from
* pfm_handle_work().
*/
}
static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
/*
* pfm_handle_work() can be called with interrupts enabled
* (TIF_NEED_RESCHED) or disabled. The down_interruptible
* call may sleep, therefore we must re-enable interrupts
* to avoid deadlocks. It is safe to do so because this function
* is called ONLY when returning to user level (pUStk=1), in which case
* there is no risk of kernel stack overflow due to deep
* interrupt nesting.
*/
void
pfm_handle_work(void)
{
pfm_context_t *ctx;
struct pt_regs *regs;
unsigned long flags, dummy_flags;
unsigned long ovfl_regs;
unsigned int reason;
int ret;
ctx = PFM_GET_CTX(current);
if (ctx == NULL) {
printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
task_pid_nr(current));
return;
}
PROTECT_CTX(ctx, flags);
PFM_SET_WORK_PENDING(current, 0);
regs = task_pt_regs(current);
/*
* extract reason for being here and clear
*/
reason = ctx->ctx_fl_trap_reason;
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
ovfl_regs = ctx->ctx_ovfl_regs[0];
DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
/*
* must be done before we check for simple-reset mode
*/
if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
goto do_zombie;
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
if (reason == PFM_TRAP_REASON_RESET)
goto skip_blocking;
/*
* restore interrupt mask to what it was on entry.
* Could be enabled/diasbled.
*/
UNPROTECT_CTX(ctx, flags);
/*
* force interrupt enable because of down_interruptible()
*/
local_irq_enable();
DPRINT(("before block sleeping\n"));
/*
* may go through without blocking on SMP systems
* if restart has been received already by the time we call down()
*/
ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
DPRINT(("after block sleeping ret=%d\n", ret));
/*
* lock context and mask interrupts again
* We save flags into a dummy because we may have
* altered interrupts mask compared to entry in this
* function.
*/
PROTECT_CTX(ctx, dummy_flags);
/*
* we need to read the ovfl_regs only after wake-up
* because we may have had pfm_write_pmds() in between
* and that can changed PMD values and therefore
* ovfl_regs is reset for these new PMD values.
*/
ovfl_regs = ctx->ctx_ovfl_regs[0];
if (ctx->ctx_fl_going_zombie) {
do_zombie:
DPRINT(("context is zombie, bailing out\n"));
pfm_context_force_terminate(ctx, regs);
goto nothing_to_do;
}
/*
* in case of interruption of down() we don't restart anything
*/
if (ret < 0)
goto nothing_to_do;
skip_blocking:
pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
ctx->ctx_ovfl_regs[0] = 0UL;
nothing_to_do:
/*
* restore flags as they were upon entry
*/
UNPROTECT_CTX(ctx, flags);
}
static int
pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
{
if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
DPRINT(("ignoring overflow notification, owner is zombie\n"));
return 0;
}
DPRINT(("waking up somebody\n"));
if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
/*
* safe, we are not in intr handler, nor in ctxsw when
* we come here
*/
kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
return 0;
}
static int
pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
{
pfm_msg_t *msg = NULL;
if (ctx->ctx_fl_no_msg == 0) {
msg = pfm_get_new_msg(ctx);
if (msg == NULL) {
printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
return -1;
}
msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
msg->pfm_ovfl_msg.msg_active_set = 0;
msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
msg->pfm_ovfl_msg.msg_tstamp = 0UL;
}
DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
msg,
ctx->ctx_fl_no_msg,
ctx->ctx_fd,
ovfl_pmds));
return pfm_notify_user(ctx, msg);
}
static int
pfm_end_notify_user(pfm_context_t *ctx)
{
pfm_msg_t *msg;
msg = pfm_get_new_msg(ctx);
if (msg == NULL) {
printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
return -1;
}
/* no leak */
memset(msg, 0, sizeof(*msg));
msg->pfm_end_msg.msg_type = PFM_MSG_END;
msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
msg->pfm_ovfl_msg.msg_tstamp = 0UL;
DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
msg,
ctx->ctx_fl_no_msg,
ctx->ctx_fd));
return pfm_notify_user(ctx, msg);
}
/*
* main overflow processing routine.
* it can be called from the interrupt path or explicitly during the context switch code
*/
static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
unsigned long pmc0, struct pt_regs *regs)
{
pfm_ovfl_arg_t *ovfl_arg;
unsigned long mask;
unsigned long old_val, ovfl_val, new_val;
unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
unsigned long tstamp;
pfm_ovfl_ctrl_t ovfl_ctrl;
unsigned int i, has_smpl;
int must_notify = 0;
if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
/*
* sanity test. Should never happen
*/
if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
tstamp = ia64_get_itc();
mask = pmc0 >> PMU_FIRST_COUNTER;
ovfl_val = pmu_conf->ovfl_val;
has_smpl = CTX_HAS_SMPL(ctx);
DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
"used_pmds=0x%lx\n",
pmc0,
task ? task_pid_nr(task): -1,
(regs ? regs->cr_iip : 0),
CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
ctx->ctx_used_pmds[0]));
/*
* first we update the virtual counters
* assume there was a prior ia64_srlz_d() issued
*/
for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
/* skip pmd which did not overflow */
if ((mask & 0x1) == 0) continue;
/*
* Note that the pmd is not necessarily 0 at this point as qualified events
* may have happened before the PMU was frozen. The residual count is not
* taken into consideration here but will be with any read of the pmd via
* pfm_read_pmds().
*/
old_val = new_val = ctx->ctx_pmds[i].val;
new_val += 1 + ovfl_val;
ctx->ctx_pmds[i].val = new_val;
/*
* check for overflow condition
*/
if (likely(old_val > new_val)) {
ovfl_pmds |= 1UL << i;
if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
}
DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
i,
new_val,
old_val,
ia64_get_pmd(i) & ovfl_val,
ovfl_pmds,
ovfl_notify));
}
/*
* there was no 64-bit overflow, nothing else to do
*/
if (ovfl_pmds == 0UL) return;
/*
* reset all control bits
*/
ovfl_ctrl.val = 0;
reset_pmds = 0UL;
/*
* if a sampling format module exists, then we "cache" the overflow by
* calling the module's handler() routine.
*/
if (has_smpl) {
unsigned long start_cycles, end_cycles;
unsigned long pmd_mask;
int j, k, ret = 0;
int this_cpu = smp_processor_id();
pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
ovfl_arg = &ctx->ctx_ovfl_arg;
prefetch(ctx->ctx_smpl_hdr);
for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
mask = 1UL << i;
if ((pmd_mask & 0x1) == 0) continue;
ovfl_arg->ovfl_pmd = (unsigned char )i;
ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
ovfl_arg->active_set = 0;
ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
/*
* copy values of pmds of interest. Sampling format may copy them
* into sampling buffer.
*/
if (smpl_pmds) {
for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
if ((smpl_pmds & 0x1) == 0) continue;
ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
}
}
pfm_stats[this_cpu].pfm_smpl_handler_calls++;
start_cycles = ia64_get_itc();
/*
* call custom buffer format record (handler) routine
*/
ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
end_cycles = ia64_get_itc();
/*
* For those controls, we take the union because they have
* an all or nothing behavior.
*/
ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
/*
* build the bitmask of pmds to reset now
*/
if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
}
/*
* when the module cannot handle the rest of the overflows, we abort right here
*/
if (ret && pmd_mask) {
DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
pmd_mask<<PMU_FIRST_COUNTER));
}
/*
* remove the pmds we reset now from the set of pmds to reset in pfm_restart()
*/
ovfl_pmds &= ~reset_pmds;
} else {
/*
* when no sampling module is used, then the default
* is to notify on overflow if requested by user
*/
ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
/*
* if needed, we reset all overflowed pmds
*/
if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
}
DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
/*
* reset the requested PMD registers using the short reset values
*/
if (reset_pmds) {
unsigned long bm = reset_pmds;
pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
}
if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
/*
* keep track of what to reset when unblocking
*/
ctx->ctx_ovfl_regs[0] = ovfl_pmds;
/*
* check for blocking context
*/
if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
/*
* set the perfmon specific checking pending work for the task
*/
PFM_SET_WORK_PENDING(task, 1);
/*
* when coming from ctxsw, current still points to the
* previous task, therefore we must work with task and not current.
*/
set_notify_resume(task);
}
/*
* defer until state is changed (shorten spin window). the context is locked
* anyway, so the signal receiver would come spin for nothing.
*/
must_notify = 1;
}
DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
PFM_GET_WORK_PENDING(task),
ctx->ctx_fl_trap_reason,
ovfl_pmds,
ovfl_notify,
ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
/*
* in case monitoring must be stopped, we toggle the psr bits
*/
if (ovfl_ctrl.bits.mask_monitoring) {
pfm_mask_monitoring(task);
ctx->ctx_state = PFM_CTX_MASKED;
ctx->ctx_fl_can_restart = 1;
}
/*
* send notification now
*/
if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
return;
sanity_check:
printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
smp_processor_id(),
task ? task_pid_nr(task) : -1,
pmc0);
return;
stop_monitoring:
/*
* in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
* Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
* come here as zombie only if the task is the current task. In which case, we
* can access the PMU hardware directly.
*
* Note that zombies do have PM_VALID set. So here we do the minimal.
*
* In case the context was zombified it could not be reclaimed at the time
* the monitoring program exited. At this point, the PMU reservation has been
* returned, the sampiing buffer has been freed. We must convert this call
* into a spurious interrupt. However, we must also avoid infinite overflows
* by stopping monitoring for this task. We can only come here for a per-task
* context. All we need to do is to stop monitoring using the psr bits which
* are always task private. By re-enabling secure montioring, we ensure that
* the monitored task will not be able to re-activate monitoring.
* The task will eventually be context switched out, at which point the context
* will be reclaimed (that includes releasing ownership of the PMU).
*
* So there might be a window of time where the number of per-task session is zero
* yet one PMU might have a owner and get at most one overflow interrupt for a zombie
* context. This is safe because if a per-task session comes in, it will push this one
* out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
* session is force on that CPU, given that we use task pinning, pfm_save_regs() will
* also push our zombie context out.
*
* Overall pretty hairy stuff....
*/
DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
pfm_clear_psr_up();
ia64_psr(regs)->up = 0;
ia64_psr(regs)->sp = 1;
return;
}
static int
pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
{
struct task_struct *task;
pfm_context_t *ctx;
unsigned long flags;
u64 pmc0;
int this_cpu = smp_processor_id();
int retval = 0;
pfm_stats[this_cpu].pfm_ovfl_intr_count++;
/*
* srlz.d done before arriving here
*/
pmc0 = ia64_get_pmc(0);
task = GET_PMU_OWNER();
ctx = GET_PMU_CTX();
/*
* if we have some pending bits set
* assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
*/
if (PMC0_HAS_OVFL(pmc0) && task) {
/*
* we assume that pmc0.fr is always set here
*/
/* sanity check */
if (!ctx) goto report_spurious1;
if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
goto report_spurious2;
PROTECT_CTX_NOPRINT(ctx, flags);
pfm_overflow_handler(task, ctx, pmc0, regs);
UNPROTECT_CTX_NOPRINT(ctx, flags);
} else {
pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
retval = -1;
}
/*
* keep it unfrozen at all times
*/
pfm_unfreeze_pmu();
return retval;
report_spurious1:
printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
this_cpu, task_pid_nr(task));
pfm_unfreeze_pmu();
return -1;
report_spurious2:
printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
this_cpu,
task_pid_nr(task));
pfm_unfreeze_pmu();
return -1;
}
static irqreturn_t
pfm_interrupt_handler(int irq, void *arg)
{
unsigned long start_cycles, total_cycles;
unsigned long min, max;
int this_cpu;
int ret;
struct pt_regs *regs = get_irq_regs();
this_cpu = get_cpu();
if (likely(!pfm_alt_intr_handler)) {
min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
start_cycles = ia64_get_itc();
ret = pfm_do_interrupt_handler(arg, regs);
total_cycles = ia64_get_itc();
/*
* don't measure spurious interrupts
*/
if (likely(ret == 0)) {
total_cycles -= start_cycles;
if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
}
}
else {
(*pfm_alt_intr_handler->handler)(irq, arg, regs);
}
put_cpu();
return IRQ_HANDLED;
}
/*
* /proc/perfmon interface, for debug only
*/
#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
static void *
pfm_proc_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0) {
return PFM_PROC_SHOW_HEADER;
}
while (*pos <= nr_cpu_ids) {
if (cpu_online(*pos - 1)) {
return (void *)*pos;
}
++*pos;
}
return NULL;
}
static void *
pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return pfm_proc_start(m, pos);
}
static void
pfm_proc_stop(struct seq_file *m, void *v)
{
}
static void
pfm_proc_show_header(struct seq_file *m)
{
struct list_head * pos;
pfm_buffer_fmt_t * entry;
unsigned long flags;
seq_printf(m,
"perfmon version : %u.%u\n"
"model : %s\n"
"fastctxsw : %s\n"
"expert mode : %s\n"
"ovfl_mask : 0x%lx\n"
"PMU flags : 0x%x\n",
PFM_VERSION_MAJ, PFM_VERSION_MIN,
pmu_conf->pmu_name,
pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
pfm_sysctl.expert_mode > 0 ? "Yes": "No",
pmu_conf->ovfl_val,
pmu_conf->flags);
LOCK_PFS(flags);
seq_printf(m,
"proc_sessions : %u\n"
"sys_sessions : %u\n"
"sys_use_dbregs : %u\n"
"ptrace_use_dbregs : %u\n",
pfm_sessions.pfs_task_sessions,
pfm_sessions.pfs_sys_sessions,
pfm_sessions.pfs_sys_use_dbregs,
pfm_sessions.pfs_ptrace_use_dbregs);
UNLOCK_PFS(flags);
spin_lock(&pfm_buffer_fmt_lock);
list_for_each(pos, &pfm_buffer_fmt_list) {
entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
entry->fmt_uuid[0],
entry->fmt_uuid[1],
entry->fmt_uuid[2],
entry->fmt_uuid[3],
entry->fmt_uuid[4],
entry->fmt_uuid[5],
entry->fmt_uuid[6],
entry->fmt_uuid[7],
entry->fmt_uuid[8],
entry->fmt_uuid[9],
entry->fmt_uuid[10],
entry->fmt_uuid[11],
entry->fmt_uuid[12],
entry->fmt_uuid[13],
entry->fmt_uuid[14],
entry->fmt_uuid[15],
entry->fmt_name);
}
spin_unlock(&pfm_buffer_fmt_lock);
}
static int
pfm_proc_show(struct seq_file *m, void *v)
{
unsigned long psr;
unsigned int i;
int cpu;
if (v == PFM_PROC_SHOW_HEADER) {
pfm_proc_show_header(m);
return 0;
}
/* show info for CPU (v - 1) */
cpu = (long)v - 1;
seq_printf(m,
"CPU%-2d overflow intrs : %lu\n"
"CPU%-2d overflow cycles : %lu\n"
"CPU%-2d overflow min : %lu\n"
"CPU%-2d overflow max : %lu\n"
"CPU%-2d smpl handler calls : %lu\n"
"CPU%-2d smpl handler cycles : %lu\n"
"CPU%-2d spurious intrs : %lu\n"
"CPU%-2d replay intrs : %lu\n"
"CPU%-2d syst_wide : %d\n"
"CPU%-2d dcr_pp : %d\n"
"CPU%-2d exclude idle : %d\n"
"CPU%-2d owner : %d\n"
"CPU%-2d context : %p\n"
"CPU%-2d activations : %lu\n",
cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
cpu, pfm_get_cpu_data(pmu_ctx, cpu),
cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
psr = pfm_get_psr();
ia64_srlz_d();
seq_printf(m,
"CPU%-2d psr : 0x%lx\n"
"CPU%-2d pmc0 : 0x%lx\n",
cpu, psr,
cpu, ia64_get_pmc(0));
for (i=0; PMC_IS_LAST(i) == 0; i++) {
if (PMC_IS_COUNTING(i) == 0) continue;
seq_printf(m,
"CPU%-2d pmc%u : 0x%lx\n"
"CPU%-2d pmd%u : 0x%lx\n",
cpu, i, ia64_get_pmc(i),
cpu, i, ia64_get_pmd(i));
}
}
return 0;
}
const struct seq_operations pfm_seq_ops = {
.start = pfm_proc_start,
.next = pfm_proc_next,
.stop = pfm_proc_stop,
.show = pfm_proc_show
};
static int
pfm_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &pfm_seq_ops);
}
/*
* we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
* during pfm_enable() hence before pfm_start(). We cannot assume monitoring
* is active or inactive based on mode. We must rely on the value in
* local_cpu_data->pfm_syst_info
*/
void
pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
{
struct pt_regs *regs;
unsigned long dcr;
unsigned long dcr_pp;
dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
/*
* pid 0 is guaranteed to be the idle task. There is one such task with pid 0
* on every CPU, so we can rely on the pid to identify the idle task.
*/
if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
regs = task_pt_regs(task);
ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
return;
}
/*
* if monitoring has started
*/
if (dcr_pp) {
dcr = ia64_getreg(_IA64_REG_CR_DCR);
/*
* context switching in?
*/
if (is_ctxswin) {
/* mask monitoring for the idle task */
ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
pfm_clear_psr_pp();
ia64_srlz_i();
return;
}
/*
* context switching out
* restore monitoring for next task
*
* Due to inlining this odd if-then-else construction generates
* better code.
*/
ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
pfm_set_psr_pp();
ia64_srlz_i();
}
}
#ifdef CONFIG_SMP
static void
pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
{
struct task_struct *task = ctx->ctx_task;
ia64_psr(regs)->up = 0;
ia64_psr(regs)->sp = 1;
if (GET_PMU_OWNER() == task) {
DPRINT(("cleared ownership for [%d]\n",
task_pid_nr(ctx->ctx_task)));
SET_PMU_OWNER(NULL, NULL);
}
/*
* disconnect the task from the context and vice-versa
*/
PFM_SET_WORK_PENDING(task, 0);
task->thread.pfm_context = NULL;
task->thread.flags &= ~IA64_THREAD_PM_VALID;
DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
}
/*
* in 2.6, interrupts are masked when we come here and the runqueue lock is held
*/
void
pfm_save_regs(struct task_struct *task)
{
pfm_context_t *ctx;
unsigned long flags;
u64 psr;
ctx = PFM_GET_CTX(task);
if (ctx == NULL) return;
/*
* we always come here with interrupts ALREADY disabled by
* the scheduler. So we simply need to protect against concurrent
* access, not CPU concurrency.
*/
flags = pfm_protect_ctx_ctxsw(ctx);
if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
struct pt_regs *regs = task_pt_regs(task);
pfm_clear_psr_up();
pfm_force_cleanup(ctx, regs);
BUG_ON(ctx->ctx_smpl_hdr);
pfm_unprotect_ctx_ctxsw(ctx, flags);
pfm_context_free(ctx);
return;
}
/*
* save current PSR: needed because we modify it
*/
ia64_srlz_d();
psr = pfm_get_psr();
BUG_ON(psr & (IA64_PSR_I));
/*
* stop monitoring:
* This is the last instruction which may generate an overflow
*
* We do not need to set psr.sp because, it is irrelevant in kernel.
* It will be restored from ipsr when going back to user level
*/
pfm_clear_psr_up();
/*
* keep a copy of psr.up (for reload)
*/
ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
/*
* release ownership of this PMU.
* PM interrupts are masked, so nothing
* can happen.
*/
SET_PMU_OWNER(NULL, NULL);
/*
* we systematically save the PMD as we have no
* guarantee we will be schedule at that same
* CPU again.
*/
pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
/*
* save pmc0 ia64_srlz_d() done in pfm_save_pmds()
* we will need it on the restore path to check
* for pending overflow.
*/
ctx->th_pmcs[0] = ia64_get_pmc(0);
/*
* unfreeze PMU if had pending overflows
*/
if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
/*
* finally, allow context access.
* interrupts will still be masked after this call.
*/
pfm_unprotect_ctx_ctxsw(ctx, flags);
}
#else /* !CONFIG_SMP */
void
pfm_save_regs(struct task_struct *task)
{
pfm_context_t *ctx;
u64 psr;
ctx = PFM_GET_CTX(task);
if (ctx == NULL) return;
/*
* save current PSR: needed because we modify it
*/
psr = pfm_get_psr();
BUG_ON(psr & (IA64_PSR_I));
/*
* stop monitoring:
* This is the last instruction which may generate an overflow
*
* We do not need to set psr.sp because, it is irrelevant in kernel.
* It will be restored from ipsr when going back to user level
*/
pfm_clear_psr_up();
/*
* keep a copy of psr.up (for reload)
*/
ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
}
static void
pfm_lazy_save_regs (struct task_struct *task)
{
pfm_context_t *ctx;
unsigned long flags;
{ u64 psr = pfm_get_psr();
BUG_ON(psr & IA64_PSR_UP);
}
ctx = PFM_GET_CTX(task);
/*
* we need to mask PMU overflow here to
* make sure that we maintain pmc0 until
* we save it. overflow interrupts are
* treated as spurious if there is no
* owner.
*
* XXX: I don't think this is necessary
*/
PROTECT_CTX(ctx,flags);
/*
* release ownership of this PMU.
* must be done before we save the registers.
*
* after this call any PMU interrupt is treated
* as spurious.
*/
SET_PMU_OWNER(NULL, NULL);
/*
* save all the pmds we use
*/
pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
/*
* save pmc0 ia64_srlz_d() done in pfm_save_pmds()
* it is needed to check for pended overflow
* on the restore path
*/
ctx->th_pmcs[0] = ia64_get_pmc(0);
/*
* unfreeze PMU if had pending overflows
*/
if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
/*
* now get can unmask PMU interrupts, they will
* be treated as purely spurious and we will not
* lose any information
*/
UNPROTECT_CTX(ctx,flags);
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_SMP
/*
* in 2.6, interrupts are masked when we come here and the runqueue lock is held
*/
void
pfm_load_regs (struct task_struct *task)
{
pfm_context_t *ctx;
unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
unsigned long flags;
u64 psr, psr_up;
int need_irq_resend;
ctx = PFM_GET_CTX(task);
if (unlikely(ctx == NULL)) return;
BUG_ON(GET_PMU_OWNER());
/*
* possible on unload
*/
if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
/*
* we always come here with interrupts ALREADY disabled by
* the scheduler. So we simply need to protect against concurrent
* access, not CPU concurrency.
*/
flags = pfm_protect_ctx_ctxsw(ctx);
psr = pfm_get_psr();
need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I);
if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
struct pt_regs *regs = task_pt_regs(task);
BUG_ON(ctx->ctx_smpl_hdr);
pfm_force_cleanup(ctx, regs);
pfm_unprotect_ctx_ctxsw(ctx, flags);
/*
* this one (kmalloc'ed) is fine with interrupts disabled
*/
pfm_context_free(ctx);
return;
}
/*
* we restore ALL the debug registers to avoid picking up
* stale state.
*/
if (ctx->ctx_fl_using_dbreg) {
pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
}
/*
* retrieve saved psr.up
*/
psr_up = ctx->ctx_saved_psr_up;
/*
* if we were the last user of the PMU on that CPU,
* then nothing to do except restore psr
*/
if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
/*
* retrieve partial reload masks (due to user modifications)
*/
pmc_mask = ctx->ctx_reload_pmcs[0];
pmd_mask = ctx->ctx_reload_pmds[0];
} else {
/*
* To avoid leaking information to the user level when psr.sp=0,
* we must reload ALL implemented pmds (even the ones we don't use).
* In the kernel we only allow PFM_READ_PMDS on registers which
* we initialized or requested (sampling) so there is no risk there.
*/
pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
/*
* ALL accessible PMCs are systematically reloaded, unused registers
* get their default (from pfm_reset_pmu_state()) values to avoid picking
* up stale configuration.
*
* PMC0 is never in the mask. It is always restored separately.
*/
pmc_mask = ctx->ctx_all_pmcs[0];
}
/*
* when context is MASKED, we will restore PMC with plm=0
* and PMD with stale information, but that's ok, nothing
* will be captured.
*
* XXX: optimize here
*/
if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
/*
* check for pending overflow at the time the state
* was saved.
*/
if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
/*
* reload pmc0 with the overflow information
* On McKinley PMU, this will trigger a PMU interrupt
*/
ia64_set_pmc(0, ctx->th_pmcs[0]);
ia64_srlz_d();
ctx->th_pmcs[0] = 0UL;
/*
* will replay the PMU interrupt
*/
if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
}
/*
* we just did a reload, so we reset the partial reload fields
*/
ctx->ctx_reload_pmcs[0] = 0UL;
ctx->ctx_reload_pmds[0] = 0UL;
SET_LAST_CPU(ctx, smp_processor_id());
/*
* dump activation value for this PMU
*/
INC_ACTIVATION();
/*
* record current activation for this context
*/
SET_ACTIVATION(ctx);
/*
* establish new ownership.
*/
SET_PMU_OWNER(task, ctx);
/*
* restore the psr.up bit. measurement
* is active again.
* no PMU interrupt can happen at this point
* because we still have interrupts disabled.
*/
if (likely(psr_up)) pfm_set_psr_up();
/*
* allow concurrent access to context
*/
pfm_unprotect_ctx_ctxsw(ctx, flags);
}
#else /* !CONFIG_SMP */
/*
* reload PMU state for UP kernels
* in 2.5 we come here with interrupts disabled
*/
void
pfm_load_regs (struct task_struct *task)
{
pfm_context_t *ctx;
struct task_struct *owner;
unsigned long pmd_mask, pmc_mask;
u64 psr, psr_up;
int need_irq_resend;
owner = GET_PMU_OWNER();
ctx = PFM_GET_CTX(task);
psr = pfm_get_psr();
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I);
/*
* we restore ALL the debug registers to avoid picking up
* stale state.
*
* This must be done even when the task is still the owner
* as the registers may have been modified via ptrace()
* (not perfmon) by the previous task.
*/
if (ctx->ctx_fl_using_dbreg) {
pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
}
/*
* retrieved saved psr.up
*/
psr_up = ctx->ctx_saved_psr_up;
need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
/*
* short path, our state is still there, just
* need to restore psr and we go
*
* we do not touch either PMC nor PMD. the psr is not touched
* by the overflow_handler. So we are safe w.r.t. to interrupt
* concurrency even without interrupt masking.
*/
if (likely(owner == task)) {
if (likely(psr_up)) pfm_set_psr_up();
return;
}
/*
* someone else is still using the PMU, first push it out and
* then we'll be able to install our stuff !
*
* Upon return, there will be no owner for the current PMU
*/
if (owner) pfm_lazy_save_regs(owner);
/*
* To avoid leaking information to the user level when psr.sp=0,
* we must reload ALL implemented pmds (even the ones we don't use).
* In the kernel we only allow PFM_READ_PMDS on registers which
* we initialized or requested (sampling) so there is no risk there.
*/
pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
/*
* ALL accessible PMCs are systematically reloaded, unused registers
* get their default (from pfm_reset_pmu_state()) values to avoid picking
* up stale configuration.
*
* PMC0 is never in the mask. It is always restored separately
*/
pmc_mask = ctx->ctx_all_pmcs[0];
pfm_restore_pmds(ctx->th_pmds, pmd_mask);
pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
/*
* check for pending overflow at the time the state
* was saved.
*/
if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
/*
* reload pmc0 with the overflow information
* On McKinley PMU, this will trigger a PMU interrupt
*/
ia64_set_pmc(0, ctx->th_pmcs[0]);
ia64_srlz_d();
ctx->th_pmcs[0] = 0UL;
/*
* will replay the PMU interrupt
*/
if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
}
/*
* establish new ownership.
*/
SET_PMU_OWNER(task, ctx);
/*
* restore the psr.up bit. measurement
* is active again.
* no PMU interrupt can happen at this point
* because we still have interrupts disabled.
*/
if (likely(psr_up)) pfm_set_psr_up();
}
#endif /* CONFIG_SMP */
/*
* this function assumes monitoring is stopped
*/
static void
pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
{
u64 pmc0;
unsigned long mask2, val, pmd_val, ovfl_val;
int i, can_access_pmu = 0;
int is_self;
/*
* is the caller the task being monitored (or which initiated the
* session for system wide measurements)
*/
is_self = ctx->ctx_task == task ? 1 : 0;
/*
* can access PMU is task is the owner of the PMU state on the current CPU
* or if we are running on the CPU bound to the context in system-wide mode
* (that is not necessarily the task the context is attached to in this mode).
* In system-wide we always have can_access_pmu true because a task running on an
* invalid processor is flagged earlier in the call stack (see pfm_stop).
*/
can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
if (can_access_pmu) {
/*
* Mark the PMU as not owned
* This will cause the interrupt handler to do nothing in case an overflow
* interrupt was in-flight
* This also guarantees that pmc0 will contain the final state
* It virtually gives us full control on overflow processing from that point
* on.
*/
SET_PMU_OWNER(NULL, NULL);
DPRINT(("releasing ownership\n"));
/*
* read current overflow status:
*
* we are guaranteed to read the final stable state
*/
ia64_srlz_d();
pmc0 = ia64_get_pmc(0); /* slow */
/*
* reset freeze bit, overflow status information destroyed
*/
pfm_unfreeze_pmu();
} else {
pmc0 = ctx->th_pmcs[0];
/*
* clear whatever overflow status bits there were
*/
ctx->th_pmcs[0] = 0;
}
ovfl_val = pmu_conf->ovfl_val;
/*
* we save all the used pmds
* we take care of overflows for counting PMDs
*
* XXX: sampling situation is not taken into account here
*/
mask2 = ctx->ctx_used_pmds[0];
DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
for (i = 0; mask2; i++, mask2>>=1) {
/* skip non used pmds */
if ((mask2 & 0x1) == 0) continue;
/*
* can access PMU always true in system wide mode
*/
val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
if (PMD_IS_COUNTING(i)) {
DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
task_pid_nr(task),
i,
ctx->ctx_pmds[i].val,
val & ovfl_val));
/*
* we rebuild the full 64 bit value of the counter
*/
val = ctx->ctx_pmds[i].val + (val & ovfl_val);
/*
* now everything is in ctx_pmds[] and we need
* to clear the saved context from save_regs() such that
* pfm_read_pmds() gets the correct value
*/
pmd_val = 0UL;
/*
* take care of overflow inline
*/
if (pmc0 & (1UL << i)) {
val += 1 + ovfl_val;
DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
}
}
DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
if (is_self) ctx->th_pmds[i] = pmd_val;
ctx->ctx_pmds[i].val = val;
}
}
static struct irqaction perfmon_irqaction = {
.handler = pfm_interrupt_handler,
.flags = IRQF_DISABLED,
.name = "perfmon"
};
static void
pfm_alt_save_pmu_state(void *data)
{
struct pt_regs *regs;
regs = task_pt_regs(current);
DPRINT(("called\n"));
/*
* should not be necessary but
* let's take not risk
*/
pfm_clear_psr_up();
pfm_clear_psr_pp();
ia64_psr(regs)->pp = 0;
/*
* This call is required
* May cause a spurious interrupt on some processors
*/
pfm_freeze_pmu();
ia64_srlz_d();
}
void
pfm_alt_restore_pmu_state(void *data)
{
struct pt_regs *regs;
regs = task_pt_regs(current);
DPRINT(("called\n"));
/*
* put PMU back in state expected
* by perfmon
*/
pfm_clear_psr_up();
pfm_clear_psr_pp();
ia64_psr(regs)->pp = 0;
/*
* perfmon runs with PMU unfrozen at all times
*/
pfm_unfreeze_pmu();
ia64_srlz_d();
}
int
pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
{
int ret, i;
int reserve_cpu;
/* some sanity checks */
if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
/* do the easy test first */
if (pfm_alt_intr_handler) return -EBUSY;
/* one at a time in the install or remove, just fail the others */
if (!spin_trylock(&pfm_alt_install_check)) {
return -EBUSY;
}
/* reserve our session */
for_each_online_cpu(reserve_cpu) {
ret = pfm_reserve_session(NULL, 1, reserve_cpu);
if (ret) goto cleanup_reserve;
}
/* save the current system wide pmu states */
ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
if (ret) {
DPRINT(("on_each_cpu() failed: %d\n", ret));
goto cleanup_reserve;
}
/* officially change to the alternate interrupt handler */
pfm_alt_intr_handler = hdl;
spin_unlock(&pfm_alt_install_check);
return 0;
cleanup_reserve:
for_each_online_cpu(i) {
/* don't unreserve more than we reserved */
if (i >= reserve_cpu) break;
pfm_unreserve_session(NULL, 1, i);
}
spin_unlock(&pfm_alt_install_check);
return ret;
}
EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
int
pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
{
int i;
int ret;
if (hdl == NULL) return -EINVAL;
/* cannot remove someone else's handler! */
if (pfm_alt_intr_handler != hdl) return -EINVAL;
/* one at a time in the install or remove, just fail the others */
if (!spin_trylock(&pfm_alt_install_check)) {
return -EBUSY;
}
pfm_alt_intr_handler = NULL;
ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
if (ret) {
DPRINT(("on_each_cpu() failed: %d\n", ret));
}
for_each_online_cpu(i) {
pfm_unreserve_session(NULL, 1, i);
}
spin_unlock(&pfm_alt_install_check);
return 0;
}
EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
/*
* perfmon initialization routine, called from the initcall() table
*/
static int init_pfm_fs(void);
static int __init
pfm_probe_pmu(void)
{
pmu_config_t **p;
int family;
family = local_cpu_data->family;
p = pmu_confs;
while(*p) {
if ((*p)->probe) {
if ((*p)->probe() == 0) goto found;
} else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
goto found;
}
p++;
}
return -1;
found:
pmu_conf = *p;
return 0;
}
static const struct file_operations pfm_proc_fops = {
.open = pfm_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
int __init
pfm_init(void)
{
unsigned int n, n_counters, i;
printk("perfmon: version %u.%u IRQ %u\n",
PFM_VERSION_MAJ,
PFM_VERSION_MIN,
IA64_PERFMON_VECTOR);
if (pfm_probe_pmu()) {
printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
local_cpu_data->family);
return -ENODEV;
}
/*
* compute the number of implemented PMD/PMC from the
* description tables
*/
n = 0;
for (i=0; PMC_IS_LAST(i) == 0; i++) {
if (PMC_IS_IMPL(i) == 0) continue;
pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
n++;
}
pmu_conf->num_pmcs = n;
n = 0; n_counters = 0;
for (i=0; PMD_IS_LAST(i) == 0; i++) {
if (PMD_IS_IMPL(i) == 0) continue;
pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
n++;
if (PMD_IS_COUNTING(i)) n_counters++;
}
pmu_conf->num_pmds = n;
pmu_conf->num_counters = n_counters;
/*
* sanity checks on the number of debug registers
*/
if (pmu_conf->use_rr_dbregs) {
if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
pmu_conf = NULL;
return -1;
}
if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
pmu_conf = NULL;
return -1;
}
}
printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
pmu_conf->pmu_name,
pmu_conf->num_pmcs,
pmu_conf->num_pmds,
pmu_conf->num_counters,
ffz(pmu_conf->ovfl_val));
/* sanity check */
if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
pmu_conf = NULL;
return -1;
}
/*
* create /proc/perfmon (mostly for debugging purposes)
*/
perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
if (perfmon_dir == NULL) {
printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
pmu_conf = NULL;
return -1;
}
/*
* create /proc/sys/kernel/perfmon (for debugging purposes)
*/
pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
/*
* initialize all our spinlocks
*/
spin_lock_init(&pfm_sessions.pfs_lock);
spin_lock_init(&pfm_buffer_fmt_lock);
init_pfm_fs();
for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
return 0;
}
__initcall(pfm_init);
/*
* this function is called before pfm_init()
*/
void
pfm_init_percpu (void)
{
static int first_time=1;
/*
* make sure no measurement is active
* (may inherit programmed PMCs from EFI).
*/
pfm_clear_psr_pp();
pfm_clear_psr_up();
/*
* we run with the PMU not frozen at all times
*/
pfm_unfreeze_pmu();
if (first_time) {
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
first_time=0;
}
ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
ia64_srlz_d();
}
/*
* used for debug purposes only
*/
void
dump_pmu_state(const char *from)
{
struct task_struct *task;
struct pt_regs *regs;
pfm_context_t *ctx;
unsigned long psr, dcr, info, flags;
int i, this_cpu;
local_irq_save(flags);
this_cpu = smp_processor_id();
regs = task_pt_regs(current);
info = PFM_CPUINFO_GET();
dcr = ia64_getreg(_IA64_REG_CR_DCR);
if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
local_irq_restore(flags);
return;
}
printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
this_cpu,
from,
task_pid_nr(current),
regs->cr_iip,
current->comm);
task = GET_PMU_OWNER();
ctx = GET_PMU_CTX();
printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
psr = pfm_get_psr();
printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
this_cpu,
ia64_get_pmc(0),
psr & IA64_PSR_PP ? 1 : 0,
psr & IA64_PSR_UP ? 1 : 0,
dcr & IA64_DCR_PP ? 1 : 0,
info,
ia64_psr(regs)->up,
ia64_psr(regs)->pp);
ia64_psr(regs)->up = 0;
ia64_psr(regs)->pp = 0;
for (i=1; PMC_IS_LAST(i) == 0; i++) {
if (PMC_IS_IMPL(i) == 0) continue;
printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
}
for (i=1; PMD_IS_LAST(i) == 0; i++) {
if (PMD_IS_IMPL(i) == 0) continue;
printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
}
if (ctx) {
printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
this_cpu,
ctx->ctx_state,
ctx->ctx_smpl_vaddr,
ctx->ctx_smpl_hdr,
ctx->ctx_msgq_head,
ctx->ctx_msgq_tail,
ctx->ctx_saved_psr_up);
}
local_irq_restore(flags);
}
/*
* called from process.c:copy_thread(). task is new child.
*/
void
pfm_inherit(struct task_struct *task, struct pt_regs *regs)
{
struct thread_struct *thread;
DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
thread = &task->thread;
/*
* cut links inherited from parent (current)
*/
thread->pfm_context = NULL;
PFM_SET_WORK_PENDING(task, 0);
/*
* the psr bits are already set properly in copy_threads()
*/
}
#else /* !CONFIG_PERFMON */
asmlinkage long
sys_perfmonctl (int fd, int cmd, void *arg, int count)
{
return -ENOSYS;
}
#endif /* CONFIG_PERFMON */
| leftrepo/Owl-Kernel-for-Xperia-Sola | arch/ia64/kernel/perfmon.c | C | gpl-2.0 | 172,407 |
/*
* ISDB subtitle decoding
* Copyright (c) 2013 0p1pp1
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <iconv.h>
#include <string.h>
#include "ass_mp.h"
#include "mp_global.h"
#include "mp_msg.h"
#include "libmpdemux/stheader.h"
#include "sub/isdbsubdec.h"
#include "libavutil/avstring.h"
#include "libavutil/crc.h"
#include "libavutil/intreadwrite.h"
#undef AV_NOPTS_VALUE
#define AV_NOPTS_VALUE ((signed)INT64_C(0x8000000000000000))
#define ISDBSUB_DATA_ID 0x80
#define ISDBSUB_DU_TYPE_TXT 0x20
#define ISDBSUB_UNIT_SEP 0x1F
#define ISDBSUB_MGMNT_TIMEOUT (180 * 1000)
#define ISDBSUB_NO_DGID -1
#define ISDBSUB_MAX_LANG 2 /* ARIB TR-B14/B15 */
#define IS_HORIZONTAL_LAYOUT(format) \
((format) == ISDBSUB_FMT_960H || (format) == ISDBSUB_FMT_720H)
#define LAYOUT_GET_WIDTH(format) \
(((format) == ISDBSUB_FMT_960H || (format) == ISDBSUB_FMT_960V) ? 960 : 720)
#define LAYOUT_GET_HEIGHT(format) \
(((format) == ISDBSUB_FMT_960H || (format) == ISDBSUB_FMT_960V) ? 540 : 480)
#define MPEGTS_MAX_PTS (((2LL<<33) + 45)/90)
#define RGBA(r,g,b,a) (((unsigned)(255 - (a)) << 24) | ((b) << 16) | ((g) << 8) | (r))
static const AVCRC *Crc_table;
typedef uint32_t rgba;
static rgba Default_clut[128] =
{
//0-7
RGBA(0,0,0,255), RGBA(255,0,0,255), RGBA(0,255,0,255), RGBA(255,255,0,255),
RGBA(0,0,255,255), RGBA(255,0,255,255), RGBA(0,255,255,255), RGBA(255,255,255,255),
//8-15
RGBA(0,0,0,0), RGBA(170,0,0,255), RGBA(0,170,0,255), RGBA(170,170,0,255),
RGBA(0,0,170,255), RGBA(170,0,170,255), RGBA(0,170,170,255), RGBA(170,170,170,255),
//16-23
RGBA(0,0,85,255), RGBA(0,85,0,255), RGBA(0,85,85,255), RGBA(0,85,170,255),
RGBA(0,85,255,255), RGBA(0,170,85,255), RGBA(0,170,255,255), RGBA(0,255,85,255),
//24-31
RGBA(0,255,170,255), RGBA(85,0,0,255), RGBA(85,0,85,255), RGBA(85,0,170,255),
RGBA(85,0,255,255), RGBA(85,85,0,255), RGBA(85,85,85,255), RGBA(85,85,170,255),
//32-39
RGBA(85,85,255,255), RGBA(85,170,0,255), RGBA(85,170,85,255), RGBA(85,170,170,255),
RGBA(85,170,255,255), RGBA(85,255,0,255), RGBA(85,255,85,255), RGBA(85,255,170,255),
//40-47
RGBA(85,255,255,255), RGBA(170,0,85,255), RGBA(170,0,255,255), RGBA(170,85,0,255),
RGBA(170,85,85,255), RGBA(170,85,170,255), RGBA(170,85,255,255), RGBA(170,170,85,255),
//48-55
RGBA(170,170,255,255), RGBA(170,255,0,255), RGBA(170,255,85,255), RGBA(170,255,170,255),
RGBA(170,255,255,255), RGBA(255,0,85,255), RGBA(255,0,170,255), RGBA(255,85,0,255),
//56-63
RGBA(255,85,85,255), RGBA(255,85,170,255), RGBA(255,85,255,255), RGBA(255,170,0,255),
RGBA(255,170,85,255), RGBA(255,170,170,255), RGBA(255,170,255,255), RGBA(255,255,85,255),
//64
RGBA(255,255,170,255),
// 65-127 are caliculated later.
};
static const uint8_t * const Default_macro[16] =
{
"\x1B\x24\x42\x1B\x29\x4A\x1B\x2A\x30\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x24\x42\x1B\x29\x31\x1B\x2A\x30\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x24\x42\x1B\x29\x20\x41\x1B\x2A\x30\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x28\x32\x1B\x29\x34\x1B\x2A\x35\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x28\x32\x1B\x29\x33\x1B\x2A\x35\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x28\x32\x1B\x29\x20\x41\x1B\x2A\x35\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x28\x20\x41\x1B\x29\x20\x42\x1B\x2A\x20\x43\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x28\x20\x44\x1B\x29\x20\x45\x1B\x2A\x20\x46\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x28\x20\x47\x1B\x29\x20\x48\x1B\x2A\x20\x49\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x28\x20\x4A\x1B\x29\x20\x4B\x1B\x2A\x20\x4C\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x28\x20\x4D\x1B\x29\x20\x4E\x1B\x2A\x20\x4F\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x24\x42\x1B\x29\x20\x42\x1B\x2A\x30\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x24\x42\x1B\x29\x20\x43\x1B\x2A\x30\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x24\x42\x1B\x29\x20\x44\x1B\x2A\x30\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x28\x31\x1B\x29\x30\x1B\x2A\x4A\x1B\x2B\x20\x70\x0F\x1B\x7D",
"\x1B\x28\x4A\x1B\x29\x32\x1B\x2A\x20\x41\x1B\x2B\x20\x70\x0F\x1B\x7D"
};
struct b24str_state
{
int gl; /* index of the group invoked to GL */
int gr; /* index of the group invoked to GR */
int ss; /* flag if in SS2 or SS3. 2:SS2, 3:SS3 */
struct group
{
unsigned char mb; /* how many bytes one character consists of. */
// code for character sets
#define CODE_ASCII ('\x40')
#define CODE_ASCII2 ('\x4A')
#define CODE_JISX0208 ('\x42')
#define CODE_JISX0213_1 ('\x51')
#define CODE_JISX0213_2 ('\x50')
#define CODE_JISX0201_KATA ('\x49')
#define CODE_MOSAIC_C ('\x34')
#define CODE_MOSAIC_D ('\x35')
#define CODE_EXT ('\x3B')
#define CODE_X_HIRA ('\x30')
#define CODE_X_HIRA_P ('\x37')
#define CODE_X_KATA ('\x31')
#define CODE_X_KATA_P ('\x38')
#define CODE_X_DRCS_MB ('\x40')
#define CODE_X_DRCS_MIN ('\x41')
#define CODE_X_DRCS_MAX ('\x4F')
#define CODE_X_MACRO ('\x70')
unsigned char code; /* character set that this group designates */
} g[4];
};
struct isdbsub_layout
{
enum isdbsub_format {
ISDBSUB_FMT_960H = 0x08,
ISDBSUB_FMT_960V,
ISDBSUB_FMT_720H,
ISDBSUB_FMT_720V,
} format;
int is_profile_c; // profile C: "1seg". see ARIB TR-B14 3-4
// clipping area.
struct disp_area {
int x, y;
int w, h;
} display_area;
// for tracking pen position
int font_size; // valid values: {16, 20, 24, 30, 36} (TR-B14/B15)
struct fscale { // in [percent]
int fscx, fscy;
} font_scale; // 1/2x1/2, 1/2*1, 1*1, 1*2, 2*1, 2*2
struct spacing {
int col, row;
} cell_spacing;
// internal use for tracking pen position/line break.
// Although texts are laid out by libass,
// we need to track pen position by ourselves
// in order to calculate line breaking positions and charcter/line spacing.
int prev_char_sep;
int prev_line_desc;
int prev_line_bottom; // offset from display_area.y, not from top-margin.
int line_desc;
int linesep_upper;
int line_height;
int line_width; // pen position x
int prev_break_idx; // ctx->text.buf[prev_break_idx] holds the previous "\N"
int shift_baseline; // special case where baseline should be shifted down ?
int block_offset_h; // text[0].hspacing / 2
int block_offset_v; // line[0].lspacing_upper
int repeat_count; // -1: none, 0: until EOL, 1...i: repeat the next char i times
int in_combining; // bool
struct scroll_param {
enum {SCROLL_DIR_NONE, SCROLL_DIR_COLUMN, SCROLL_DIR_ROW} direction;
int rollout; // bool
int speed; // in pixel/sec
} scroll;
};
typedef struct isdbsub_state {
int auto_display; // bool. forced to be displayed w/o user interaction
int rollup_mode; // bool
uint8_t need_init; // bool
uint8_t clut_high_idx; // color = default_clut[high_idx << 8 | low_idx]
uint32_t fg_color;
uint32_t bg_color;
uint32_t mat_color;
struct isdbsub_layout layout_state;
struct b24str_state text_state;
} ISDBSubState;
typedef struct ISDBSubContext {
int last_mngmnt_id; // "data group id" of the last subtitle management data
int64_t last_mngmnt_pts; // time when the last mgmnt data was received
int64_t pts;
int64_t duration;
enum {
ISDBSUB_TMD_FREE,
ISDBSUB_TMD_REALTIME,
ISDBSUB_TMD_OFFSET
} timing_mode;
struct timecode {
int hour, min, sec, ms;
} offset, start; // currently unused.
ISDBSubState default_states[ISDBSUB_MAX_LANG];
int lang_tag; // language tag of the currently decoding subtitle text data
ISDBSubState current_state; //modified default_state[lang_tag]
iconv_t iconv;
struct my_str {
char *buf;
size_t len;
size_t used;
size_t txt_tail; // tail of the text, excluding trailing control sequences.
} text;
// for output
int is_style_inited;
char *script_info;
char *events;
} ISDBSubContext;
struct margins {
int l, r, v;
};
static void memdump(int level, const char *buf, int buf_size)
{
int i;
for (i=0; i < buf_size; i++) {
mp_msg(MSGT_DECSUB, level, "%02hhx ", buf[i]);
if (i % 16 == 15)
mp_msg(MSGT_DECSUB, level, "\n");
}
if (i % 16)
mp_msg(MSGT_DECSUB, level, "\n");
}
static char *my_astrconcat(char *a, char *b)
{
char *ret;
if (a == NULL)
return b ? strdup(b) : NULL;
if (b == NULL)
return a;
ret = av_asprintf("%s%s", a, b);
free(a);
return ret;
}
static char *pts_to_str(int64_t t, char *buf)
{
int ms10, sec, min, hour;
if (t == AV_NOPTS_VALUE)
return "NOPTS";
ms10 = (t % 1000) / 10;
t /= 1000;
sec = t % 60;
t /= 60;
min = t % 60;
t /= 60;
hour = t;
snprintf(buf, 13, "%02d:%02d:%02d.%02d", hour, min, sec, ms10);
return buf;
}
// NOTE: state->layout_state.format must be set before calling this func.
static void init_layout(struct isdbsub_layout *l)
{
l->font_size = 36;
l->display_area.x = 0;
l->display_area.y = 0;
switch (l->format) {
case ISDBSUB_FMT_960H:
l->display_area.w = 960;
l->display_area.h = 540;
l->cell_spacing.col = 4;
l->cell_spacing.row = 24;
break;
case ISDBSUB_FMT_960V:
l->display_area.w = 960;
l->display_area.h = 540;
l->cell_spacing.col = 12;
l->cell_spacing.row = 24;
break;
case ISDBSUB_FMT_720H:
l->display_area.w = 720;
l->display_area.h = 480;
l->cell_spacing.col = 4;
l->cell_spacing.row = 16;
break;
case ISDBSUB_FMT_720V:
l->display_area.w = 720;
l->display_area.h = 480;
l->cell_spacing.col = 8;
l->cell_spacing.row = 24;
break;
}
// profile C uses a fixed format,
// which does not define specific position or size and
// just requires to display texts in either 16x3 or 12x4 characters.
// we use ISDBSUB_FMT_960H for the base format.
if (l->is_profile_c) {
l->display_area.x = 160;
l->display_area.y = 360;
l->display_area.w = 640;
l->display_area.h = 180;
}
}
static void reset_state(ISDBSubState *state)
{
struct isdbsub_layout *l = &state->layout_state;
struct b24str_state *s = &state->text_state;
state->need_init = 1;
state->clut_high_idx = 0;
state->fg_color = Default_clut[7]; // white
state->bg_color = Default_clut[8]; // translucent
state->mat_color = Default_clut[8]; // FIXME: should be set in init_layout()?
l->block_offset_h = l->cell_spacing.col / 2;
l->block_offset_v = l->cell_spacing.row / 2;
l->font_scale.fscx = 100;
l->font_scale.fscy = 100;
l->prev_char_sep = 0;
l->prev_line_desc = 0;
l->prev_line_bottom = 0; // 0 means blcok_offset & pen_pos. are not defined yet.
l->line_height = 0;
l->line_width = 0;
l->line_desc = 0;
l->linesep_upper = 0;
l->prev_break_idx = 0;
l->shift_baseline = 0;
l->repeat_count = -1;
l->in_combining = 0;
l->scroll.direction = SCROLL_DIR_NONE;
s->gl = 0; // G0
s->gr = 2; // G2
s->ss = 0; // not in SS{2,3}
s->g[0].mb = 2;
s->g[0].code = CODE_JISX0208;
s->g[1].mb = 1;
s->g[1].code = CODE_ASCII;
s->g[2].mb = 1;
s->g[2].code = CODE_X_HIRA;
s->g[3].mb = 1;
s->g[3].code = CODE_X_MACRO;
// profile C uses different default.
if (l->is_profile_c) {
s->g[3].mb = 1;
s->g[3].code = CODE_X_DRCS_MIN;
s->gl = 3;
s->gr = 0;
}
}
static void get_margins(ISDBSubContext *ctx, struct margins *m)
{
struct isdbsub_layout *lstate = &ctx->current_state.layout_state;
if (IS_HORIZONTAL_LAYOUT(lstate->format)) {
m->l = lstate->display_area.x + lstate->block_offset_h;
m->r = LAYOUT_GET_WIDTH(lstate->format)
- (lstate->display_area.x + lstate->display_area.w);
m->v = lstate->display_area.y + lstate->block_offset_v;
if (lstate->is_profile_c)
m->v = 0;
} else {
m->l = lstate->display_area.y + lstate->block_offset_v;
m->r = LAYOUT_GET_HEIGHT(lstate->format)
- (lstate->display_area.y + lstate->display_area.h);
m->v = LAYOUT_GET_WIDTH(lstate->format)
- (lstate->display_area.x + lstate->display_area.w)
+ lstate->block_offset_h;
}
}
static void clear_text(ISDBSubContext *ctx)
{
ctx->text.used = 0;
ctx->text.txt_tail = 0;
}
static void fixup_linesep(ISDBSubContext *ctx);
static void append_event(ISDBSubContext *ctx)
{
ISDBSubState *state = &ctx->current_state;
struct isdbsub_layout *l = &state->layout_state;
char start[16], end[16];
struct margins m;
char effect[36], clipping[64];
char *dialog;
char c0 = 0;
if (ctx->pts == AV_NOPTS_VALUE || ctx->text.buf == NULL || !ctx->text.used)
return;
fixup_linesep(ctx);
mp_msg(MSGT_DECSUB, MSGL_DBG2, "append_event: %lu\n", ctx->text.used);
#if 0
if (state->rollup_mode)
ctx->duration = l->line_count * 5 * 1000; // FIXME
else if (l->scroll.direction != SCROLL_DIR_NONE && l->scroll.speed > 0)
ctx->duration = (display_area.w + (rollout ? text_box.w : 0)) * 1000 / scroll.speed
* (rollout ? 0 : 5000);
#endif
if (ctx->duration <= 0)
ctx->duration = 5000;
else if (ctx->duration < 100)
ctx->duration = 100;
pts_to_str(ctx->pts, start);
pts_to_str(ctx->pts + ctx->duration, end);
effect[0] = '\0';
get_margins(ctx, &m);
if (state->rollup_mode) {
if (IS_HORIZONTAL_LAYOUT(ctx->current_state.layout_state.format))
av_strlcatf(effect, sizeof(effect), "Scroll up;;;");
else
av_strlcatf(effect, sizeof(effect), "Banner;;;;"); // LtoR
} else if ((IS_HORIZONTAL_LAYOUT(l->format) &&
l->scroll.direction == SCROLL_DIR_COLUMN) ||
(!IS_HORIZONTAL_LAYOUT(l->format) &&
l->scroll.direction == SCROLL_DIR_ROW))
av_strlcatf(effect, sizeof(effect), "Banner;%d;%d;0;%d",
1000 / l->scroll.speed,
(l->scroll.direction == SCROLL_DIR_ROW),
!l->scroll.rollout);
else if (l->scroll.direction != SCROLL_DIR_NONE)
av_strlcatf(effect, sizeof(effect), "Scroll up;%d;%d;%d;%d",
IS_HORIZONTAL_LAYOUT(l->format) ? m.v : m.l,
l->display_area.y + l->display_area.h,
1000 / l->scroll.speed,
!l->scroll.rollout);
clipping[0] = '\0';
av_strlcatf(clipping, sizeof(clipping), "{\\clip(%d,%d,%d,%d)}",
l->display_area.x, l->display_area.y,
l->display_area.x + l->display_area.w,
l->display_area.y + l->display_area.h);
// control sequences for the next event may be appended.
if (ctx->text.txt_tail != ctx->text.used) {
c0 = ctx->text.buf[ctx->text.txt_tail];
ctx->text.buf[ctx->text.txt_tail] = '\0';
}
dialog = av_asprintf("Dialogue: %s,%s,%s,%d,%d,%d,%s,%s%s\n",
l->is_profile_c ? "prof_c" :
(IS_HORIZONTAL_LAYOUT(l->format) ? "hstyle" : "vstyle"),
pts_to_str(ctx->pts, start), pts_to_str(ctx->pts + ctx->duration, end),
m.l, m.r, m.v, effect, clipping, ctx->text.buf);
if (ctx->events) {
ctx->events = my_astrconcat(ctx->events, dialog);
free(dialog);
} else
ctx->events = dialog;
ctx->pts += ctx->duration;
ctx->duration = 0;
state->need_init = 1;
if (ctx->text.txt_tail != ctx->text.used) {
ctx->text.buf[ctx->text.txt_tail] = c0;
memmove(ctx->text.buf, ctx->text.buf + ctx->text.txt_tail,
ctx->text.used - ctx->text.txt_tail);
ctx->text.used -= ctx->text.txt_tail;
ctx->text.txt_tail = 0;
}
}
static void reserve_buf(ISDBSubContext *ctx, size_t len)
{
size_t blen;
if (ctx->text.len >= ctx->text.used + len)
return;
blen = ((ctx->text.used + len + 127) >> 7) << 7;
ctx->text.buf = av_realloc(ctx->text.buf, blen);
if (!ctx->text.buf) {
mp_msg(MSGT_DECSUB, MSGL_WARN, "out of memory for ctx->text.\n");
return;
}
ctx->text.len = blen;
mp_msg(MSGT_DECSUB, MSGL_V, "expanded ctx->text(%lu)\n", blen);
}
static int append_str(ISDBSubContext *ctx, const char *str)
{
size_t tlen = strlen(str);
reserve_buf(ctx, tlen + 1); // +1 for terminating '\0'
memcpy(ctx->text.buf + ctx->text.used, str, tlen);
ctx->text.used += tlen;
ctx->text.buf[ctx->text.used] = '\0';
return 0;
}
static int prepend_str(ISDBSubContext *ctx, const char *str)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
size_t tlen = strlen(str);
reserve_buf(ctx, tlen + 1); // +1 for terminating '\0'
memmove(ctx->text.buf + tlen, ctx->text.buf, ctx->text.used);
memcpy(ctx->text.buf, str, tlen);
if (ls->prev_break_idx)
ls->prev_break_idx += tlen;
ctx->text.used += tlen;
ctx->text.buf[ctx->text.used] = '\0';
return 0;
}
static void do_output(ISDBSubContext *ctx, uint8_t **outbufp, int *osize)
{
int header = 0;
if (!ctx->script_info && !ctx->events)
return;
mp_msg(MSGT_DECSUB, MSGL_DBG2, "making output.\n");
if (ctx->script_info) {
*outbufp = my_astrconcat(*outbufp, ctx->script_info);
free(ctx->script_info);
ctx->script_info = NULL;
header = 1;
}
if (!ctx->is_style_inited) { // once
#define STYLE_PARAMS_FONT \
":lang=ja:charset=3000:spacing=dual"
#define STYLE_PARAMS_COMMON \
"&H00FFFFFF, &HFF000000, &HFF000000, &HFF000000, 1, 0, 0, 1\n"
*outbufp = my_astrconcat(*outbufp, "[V4+ Styles]\n"
"Format: Name, Fontname, Fontsize, Alignment, "
"PrimaryColour, OutlineColour, BackColour, ClippingColour, "
"BorderStyle, Outline, Shadow, treat_fontname_as_pattern\n"
"Style: vstyle, @" STYLE_PARAMS_FONT ", 36, 9, " STYLE_PARAMS_COMMON
"Style: hstyle, " STYLE_PARAMS_FONT ", 36, 7, " STYLE_PARAMS_COMMON
"Style: prof_c, " STYLE_PARAMS_FONT ", 36, 2, " STYLE_PARAMS_COMMON);
#undef STYLE_PARAMS_FONT
#undef STYLE_PARAMS_COMMON
ctx->is_style_inited = 1;
header = 1;
}
if (ctx->events || header) {
if (header)
*outbufp = my_astrconcat(*outbufp, "\n[Events]\n"
"Format: Style, Start, End, MarginL, MarginR, MarginV, "
"Effect, Text\n");
*outbufp = my_astrconcat(*outbufp, ctx->events);
free(ctx->events);
ctx->events = NULL;
}
*osize = strlen(*outbufp);
mp_msg(MSGT_DECSUB, MSGL_V, "ass output:\n%s\n", *outbufp);
}
static void set_format(ISDBSubContext *ctx)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
clear_text(ctx);
reset_state(&ctx->current_state);
free(ctx->script_info);
ctx->script_info = av_asprintf("[Script Info]\nScript Type: v4.00+\n"
"PlayDepth: 24\nPlayResX: %d\nPlayResY: %d\nLanguage: ja\n"
"WrapStyle: 2\nStreamingMode: 1\n\n",
LAYOUT_GET_WIDTH(ls->format), LAYOUT_GET_HEIGHT(ls->format));
}
static void set_color(ISDBSubContext *ctx, int whatcolor, uint32_t color)
{
char fmt[32];
if (whatcolor < 1 || whatcolor > 5)
return;
fmt[0] = '\0';
av_strlcatf(fmt, sizeof(fmt), "{\\%dc&H%06X&\\%da&H%02X&}",
whatcolor, color & 0xffffff, whatcolor, color >> 24);
append_str(ctx, fmt);
mp_msg(MSGT_DECSUB, MSGL_DBG2, "(%d)-th color set to 0x%08X.\n",
whatcolor, color);
}
static void set_font_scale(ISDBSubContext *ctx)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
char fmt[24];
fmt[0] = '\0';
av_strlcatf(fmt, sizeof(fmt), "{\\fscx%d\\fscy%d}",
ls->font_scale.fscx, ls->font_scale.fscy);
append_str(ctx, fmt);
mp_msg(MSGT_DECSUB, MSGL_DBG2, "font scale: (%d, %d)\n",
ls->font_scale.fscx, ls->font_scale.fscy);
}
static const uint8_t *get_csi_params(const uint8_t *q,
unsigned int *p1, unsigned int *p2)
{
if (!p1)
return NULL;
*p1 = 0;
for (; *q >= 0x30 && *q <= 0x39; q++) {
*p1 *= 10;
*p1 += *q - 0x30;
}
if (!p2)
return q;
*p2 = 0;
if (*q != 0x20 && *q != 0x3B)
return NULL;
for (q++; *q >= 0x30 && *q <= 0x39; q++) {
*p2 *= 10;
*p2 += *q - 0x30;
}
return q;
}
// called at the start of sub-text or after each SWF/SDP/SDF occurrences.
// Define some style overrides at the start of Dialog line outputs,
// which are not defined in [style] nor ASS default.
// Note that SDP/SDF don't reset the other style parameters.
// Note2: multiple calls of this func safely overwrite the previous defs.
static void setup_line_head(ISDBSubContext *ctx)
{
ISDBSubState *state = &ctx->current_state;
struct isdbsub_layout *ls = &state->layout_state;
char item[256];
int cscale;
if (IS_HORIZONTAL_LAYOUT(ls->format))
cscale = ls->font_scale.fscx;
else
cscale = ls->font_scale.fscy;
item[0] = '\0';
// lsp will be corrected later.
// Note: ASS scales \fsp by font scale of the preceeding char.
av_strlcatf(item, sizeof(item), "{\\lsp0\\fsp%d}", ls->cell_spacing.col);
ls->prev_char_sep = ls->cell_spacing.col * cscale / 100;
av_strlcatf(item, sizeof(item), "{\\fs%d}", ls->font_size);
if (ls->font_scale.fscx != 100)
av_strlcatf(item, sizeof(item), "{\\fscx%d}", ls->font_scale.fscx);
if (ls->font_scale.fscy != 100)
av_strlcatf(item, sizeof(item), "{\\fscy%d}", ls->font_scale.fscy);
if (state->fg_color != 0x00FFFFFF) {
if ((state->fg_color & 0xFFFFFF) != 0xFFFFFF)
av_strlcatf(item, sizeof(item), "{\\1c&H%06X&}",
state->fg_color & 0xFFFFFF);
if ((state->fg_color >> 24) != 0x00)
av_strlcatf(item, sizeof(item), "{\\1a&H%02X&}",
state->fg_color >> 24);
}
if (state->bg_color != 0xFF000000) {
if ((state->bg_color & 0xFFFFFF) != 0)
av_strlcatf(item, sizeof(item), "{\\4c&H%06X&}",
state->bg_color & 0xFFFFFF);
if ((state->bg_color >> 24) != 0xFF)
av_strlcatf(item, sizeof(item), "{\\4a&H%02X&}",
state->bg_color >> 24);
}
if (state->mat_color != 0xFF000000) {
if ((state->mat_color & 0xFFFFFF) != 0)
av_strlcatf(item, sizeof(item), "{\\5c&H%06X&}",
state->mat_color & 0xFFFFFF);
if ((state->mat_color >> 24) != 0xFF)
av_strlcatf(item, sizeof(item), "{\\5a&H%02X&}",
state->mat_color >> 24);
}
prepend_str(ctx, item);
state->need_init = 0;
}
static void advance_by_pixels(ISDBSubContext *ctx, int csp)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
int cscale;
int csep_orig;
char tmp[32];
if (IS_HORIZONTAL_LAYOUT(ls->format))
cscale = ls->font_scale.fscx;
else
cscale = ls->font_scale.fscy;
csep_orig = ls->cell_spacing.col * cscale / 100;
tmp[0] = '\0';
av_strlcatf(tmp, sizeof(tmp), "{\\fsp%d}\xe2\x80\x8b{\\fsp%d}",
csp * 100 / cscale, ls->cell_spacing.col);
append_str(ctx, tmp);
ls->line_width += csp;
ls->prev_char_sep = csep_orig;
mp_msg(MSGT_DECSUB, MSGL_DBG2, "advanced %d pixel using fsp.\n", csp);
}
static void do_line_break(ISDBSubContext *ctx);
// move pen position to (col, row) relative to display area's top left.
// Note 1: In vertical layout, coordinates are rotated 90 deg.
// on the display area's top right.
// Note 2: the cell includes line/char spacings in both sides.
static void move_penpos(ISDBSubContext *ctx, int col, int row)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
int csp_l, col_ofs;
int cur_bottom;
int cell_height;
int cell_desc;
mp_msg(MSGT_DECSUB, MSGL_DBG2, "move pen pos. to (%d, %d).\n", col, row);
if (IS_HORIZONTAL_LAYOUT(ls->format)) {
// convert pen pos. to upper left of the cell.
cell_height = (ls->font_size + ls->cell_spacing.row)
* ls->font_scale.fscy / 100;
if (ls->font_scale.fscy == 200)
cell_desc = ls->cell_spacing.row / 2;
else
cell_desc = ls->cell_spacing.row * ls->font_scale.fscy / 200;
row -= cell_height;
csp_l = ls->cell_spacing.col * ls->font_scale.fscx / 200;
if (ls->line_width == 0 && ls->prev_line_bottom == 0)
ls->block_offset_h = csp_l;
col_ofs = ls->block_offset_h;
} else {
cell_height = (ls->font_size + ls->cell_spacing.row)
* ls->font_scale.fscx / 100;
cell_desc = cell_height / 2;
row -= cell_height - cell_desc;
csp_l = ls->cell_spacing.col * ls->font_scale.fscy / 200;
if (ls->line_width == 0 && ls->prev_line_bottom == 0)
ls->block_offset_v = csp_l;
col_ofs = ls->block_offset_v;
}
cur_bottom = ls->prev_line_bottom +
ls->linesep_upper + ls->line_height + ls->line_desc;
// allow adjusting +- cell_height/2 at maximum
// to align to the current line bottom.
if (row + cell_height / 2 > cur_bottom) {
// (col, row) is below the current line bottom
do_line_break(ctx); // ls->prev_line_bottom == cur_bottom
ls->linesep_upper = row + cell_height - cell_desc - ls->prev_line_bottom;
ls->line_height = 0;
advance_by_pixels(ctx, col + csp_l - col_ofs);
} else if (row + cell_height * 3 / 2 > cur_bottom &&
col + csp_l > col_ofs + ls->line_width) {
// append to the current line...
advance_by_pixels(ctx, col + csp_l - (col_ofs + ls->line_width));
} else {
mp_msg(MSGT_DECSUB, MSGL_INFO, "backward move not supported.\n");
return;
}
}
static void set_position(ISDBSubContext *ctx, unsigned int p1, unsigned int p2)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
int cw, ch;
int col, row;
if (IS_HORIZONTAL_LAYOUT(ls->format)) {
cw = (ls->font_size + ls->cell_spacing.col) * ls->font_scale.fscx / 100;
ch = (ls->font_size + ls->cell_spacing.row) * ls->font_scale.fscy / 100;
// pen position is at bottom left
col = p2 * cw;
row = p1 * ch + ch;
} else {
cw = (ls->font_size + ls->cell_spacing.col) * ls->font_scale.fscy / 100;
ch = (ls->font_size + ls->cell_spacing.row) * ls->font_scale.fscx / 100;
// pen position is at upper center,
// but in -90deg rotated coordinates, it is at middle left.
col = p2 * cw;
row = p1 * ch + ch / 2;
}
move_penpos(ctx, col, row);
}
static void forward_position(ISDBSubContext *ctx, int rows, int cols);
static const uint8_t *proc_ctl(ISDBSubContext *ctx,
const uint8_t *buf, int buf_size)
{
ISDBSubState *state = &ctx->current_state;
struct b24str_state *ts = &state->text_state;
struct isdbsub_layout *ls = &state->layout_state;
uint8_t code;
unsigned int p1, p2;
int i, mb;
const uint8_t *q;
while (buf_size > 0) {
code = buf[0];
if ((code & 0x60) != 0)
break;
buf++;
buf_size--;
p1 = p2 = 0;
switch (code) {
case 0x00: // NULL
break;
// cursor
case 0x09: // APF
// append a space
forward_position(ctx, 0, 1);
break;
case 0x16: // PAPF (0x40..0x7F)
if (buf_size < 1)
return NULL;
p1 = buf[0] & 0x3F;
buf++;
buf_size--;
forward_position(ctx, 0, p1);
break;
case 0x0D: // APR
forward_position(ctx, 1, 0);
mp_msg(MSGT_DECSUB, MSGL_DBG2, "explicit line break.\n");
break;
case 0x1C: // APS (0x40..0x7F, 0x40..0x7F)
if (buf_size < 2)
return NULL;
p1 = buf[0] & 0x3F;
p2 = buf[1] & 0x3F;
buf += 2;
buf_size -= 2;
mp_msg(MSGT_DECSUB, MSGL_DBG2, "aps: (%d, %d).\n", p1, p2);
set_position(ctx, p1, p2);
break;
case 0x0C: // CS
if (!state->rollup_mode) {
init_layout(ls);
reset_state(state);
clear_text(ctx);
mp_msg(MSGT_DECSUB, MSGL_DBG2, "screen cleared.\n");
}
break;
// iso2022 text state
case 0x0E: // LS1
ts->ss = 0;
ts->gl = 1;
break;
case 0x0F: // LS0
ts->ss = 0;
ts->gl = 0;
break;
case 0x19: // SS2
ts->ss = 2;
break;
case 0x1D: // SS3
ts->ss = 3;
break;
case 0x1B: // ESC(....)
if (buf_size < 1)
return NULL;
p1 = buf[0];
buf++;
buf_size--;
mb = 1;
switch (p1) {
case 0x6E: // LS2
case 0x6F: // LS3
ts->gl = 2 + (p1 & 0x01);
break;
case 0x7E: // LS1R
case 0x7D: // LS2R
case 0x7C: // LS3R
ts->gr = 0x7F - p1;
break;
// 0x28-0x2B (F) designate 1B set to G0-G3
// 0x24 [0x29-2B] (F) designate 2B set to G0-G3
// 0x28-0x2B 0x20 (F) designate 1B DRCS/MACRO to G0-G3
// 0x24 0x28-0x2B 0x20 (F) designate 2B DRCS to G0-G3
case 0x24:
if (buf_size < 1)
return NULL;
mb = 2;
p1 = buf[0];
if (p1 >= 0x28 && p1 <= 0x2B) {
buf++;
buf_size--;
} else
p1 = 0x28;
// fall through
case 0x28:
case 0x29:
case 0x2A:
case 0x2B:
if (buf_size < 1)
return NULL;
p2 = buf[0];
buf++;
buf_size--;
if (p2 == 0x20) {
if (buf_size < 1)
return NULL;
p2 = buf[0];
buf++;
buf_size--;
}
ts->g[p1 - 0x28].mb = mb;
ts->g[p1 - 0x28].code = p2;
break;
default:
mp_msg(MSGT_DECSUB, MSGL_V,
"unknown escape sequence: 0x%02hhx\n", p1);
}
break;
// color
case 0x80: // BKF
case 0x81: // RDF
case 0x82: // GRF
case 0x83: // YLF
case 0x84: // BLF
case 0x85: // MGF
case 0x86: // CNF
case 0x87: // WHF
p1 = (state->clut_high_idx << 4) | (code & 0x0F);
state->fg_color = Default_clut[p1];
set_color(ctx, 1, state->fg_color);
break;
case 0x90: // COL(0x48..0x7F) or (0x20, 0x40..0x4F)
if (buf_size < 1)
return NULL;
p1 = buf[0];
buf++;
buf_size--;
if (p1 == 0x20) {
if (buf_size < 1)
return NULL;
state->clut_high_idx = (buf[0] & 0x0F);
buf++;
buf_size--;
break;
}
p2 = (state->clut_high_idx << 4) | (p1 & 0x0F);
if ((p1 & 0xF0) == 0x40) {
state->fg_color = Default_clut[p2];
set_color(ctx, 1, state->fg_color);
}
break;
// character size
case 0x88: // SSZ
ls->font_scale.fscx = 50;
ls->font_scale.fscy = 50;
set_font_scale(ctx);
break;
case 0x89: // MSZ
ls->font_scale.fscx = 50;
ls->font_scale.fscy = 100;
set_font_scale(ctx);
break;
case 0x8A: // NSZ
ls->font_scale.fscx = 100;
ls->font_scale.fscy = 100;
set_font_scale(ctx);
break;
case 0x8B: // SZX ({0x41,0x44,0x45})
if (buf_size < 1)
return NULL;
p1 = buf[0];
buf++;
buf_size--;
ls->font_scale.fscx = 100;
ls->font_scale.fscy = 100;
if ((p1 & 0xFB) == 0x41)
ls->font_scale.fscy = 200;
if ((p1 & 0xFE) == 0x44)
ls->font_scale.fscx = 200;
set_font_scale(ctx);
break;
case 0x98: // RPC (0x40..0x7F)
if (buf_size < 1)
return NULL;
p1 = buf[0] & 0x3F;
buf++;
buf_size--;
ls->repeat_count = p1;
mp_msg(MSGT_DECSUB, MSGL_DBG2, "repeat char %d times.\n", p1);
break;
case 0x99: // SPL
append_str(ctx, "{\\u0}");
break;
case 0x9A: // STL
append_str(ctx, "{\\u1}");
break;
case 0x9B: // CSI
for (i = 0; i < buf_size; i++)
if (buf[i] >= 0x40 && buf[i] <= 0x6F)
break;
if (i == buf_size) {
buf += buf_size;
buf_size = 0;
break;
}
switch (buf[i]) {
// SWF1 (0x30..0x39, ...., 0x20, 0x53)
// -SWF2 ({0x38,0x3F?}, 0x3B, 0x30..0x32, 0x3B, 0x30..0x39, ...., [0x3B, 0x30..0x39,...,] 0x20, 0x53)
case 0x53:
q = get_csi_params(buf, &p1, NULL);
if (!q || *q != 0x20 || p1 < 7 || p1 > 10)
break;
ls->format = ISDBSUB_FMT_960H + (p1 - 7);
set_format(ctx); // calls reset_state, thus sate->need_init = 1
mp_msg(MSGT_DECSUB, MSGL_DBG2, "new format: %d\n", ls->format);
break;
// SDF (0x30..0x39,..., 0x3B, 0x30..0x39,..., 0x20, 0x56)
case 0x56:
q = get_csi_params(buf, &p1, &p2);
if (!q || *q != 0x20)
break;
ls->display_area.w = p1;
ls->display_area.h = p2;
state->need_init = 1;
mp_msg(MSGT_DECSUB, MSGL_DBG2,
"new disp. area size: (%u, %u)\n", p1, p2);
break;
// SDP (0x30..0x39,..., 0x3B, 0x30..0x39,..., 0x20, 0x5F)
case 0x5F:
q = get_csi_params(buf, &p1, &p2);
if (!q || *q != 0x20)
break;
ls->display_area.x = p1;
ls->display_area.y = p2;
state->need_init = 1;
mp_msg(MSGT_DECSUB, MSGL_DBG2,
"new disp. area pos: (%u, %u)\n", p1, p2);
break;
// RCS (0x30..0x39,...., 0x20, 0x6E)
case 0x6E:
q = get_csi_params(buf, &p1, NULL);
if (!q || *q != 0x20 || p1 > 15)
break;
state->mat_color = Default_clut[state->clut_high_idx << 4 | p1];
set_color(ctx, 5, state->mat_color);
break;
// SSM (0x30..0x39,..., 0x3B, 0x30..0x39,..., 0x20, 0x57)
case 0x57:
q = get_csi_params(buf, &p1, &p2);
if (!q || *q != 0x20 || p1 != p2)
break;
if (p1 == 16 || p1 == 20 || p1 == 24 || p1 == 30 || p1 == 36) {
char fs[8];
ls->font_size = p1;
fs[0] = '\0';
av_strlcatf(fs, sizeof(fs), "{\\fs%d}", p1);
append_str(ctx, fs);
if (ls->line_width != 0)
ls->shift_baseline = 0;
mp_msg(MSGT_DECSUB, MSGL_DBG2, "font size:%d\n", p1);
}
break;
// SHS (0x30..0x39,..., 0x20, 0x58)
// SVS (0x30..0x39,..., 0x20, 0x59)
case 0x58:
case 0x59:
q = get_csi_params(buf, &p1, NULL);
if (!q || *q != 0x20)
break;
if (code == 0x58)
ls->cell_spacing.col = p1;
else {
ls->cell_spacing.row = p1;
if (ls->line_width != 0)
ls->shift_baseline = 0;
}
mp_msg(MSGT_DECSUB, MSGL_DBG2, "%c-spacing:%u.\n",
(code == 0x58) ? 'h' : 'v', p1);
// no output here. automatically inserted later in proc_char().
break;
// ORN (0x30, 0x20, 0x63) or (0x31, 0x3B, 0x30..0x39 * 4, 0x20, 0x63)
case 0x63:
q = get_csi_params(buf, &p1, &p2);
if (!q)
break;
if (p1 == 0) // no ornaments
append_str(ctx, "{\\bord0\\3a&HFF&\\3c&H000000&}");
else if (p1 == 1 && *q == 0x20) { // outline
int idx = (p2 / 100) << 4 | (p2 % 100);
if (idx < 0 || idx > 127)
break;
append_str(ctx, "{\\bord1}");
set_color(ctx, 3, Default_clut[idx]);
}
break;
// SCR (0x30..0x34, 0x3B, 0x30..0x39..., 0x20, 0x67)
case 0x67:
q = get_csi_params(buf, &p1, &p2);
if (!q || *q != 0x20 || p1 > 4)
break;
if (p1 != 0)
clear_text(ctx);
ls->scroll.direction = (p1 + 1) / 2;
ls->scroll.rollout = (p1 == 2 || p1 == 4);
if (p2 == 0)
p2 = 1;
ls->scroll.speed = p2;
break;
// ACPS (0x30..0x39,...., 0x3B, 0x30..0x39,...., 0x20, 0x61)
case 0x61:
q = get_csi_params(buf, &p1, &p2);
if (!q || *q != 0x20)
break;
if ((signed) p1 < ls->display_area.x
|| (signed) p1 >= ls->display_area.x + ls->display_area.w
|| (signed) p2 < ls->display_area.y
|| (signed) p2 >= ls->display_area.y + ls->display_area.h) {
mp_msg(MSGT_DECSUB, MSGL_WARN,
"invalid parameters (%u, %u) in ACPS.\n", p1, p2);
break;
}
if (IS_HORIZONTAL_LAYOUT(ls->format))
move_penpos(ctx, p1 - ls->display_area.x,
p2 - ls->display_area.y);
else
move_penpos(ctx, ls->display_area.w - (p1 - ls->display_area.x),
p2 - ls->display_area.y);
break;
// -CCC ({0x30, 0x32..0x34}, 0x20, 0x54)
// -PLD (0x5B)
// -PLU (0x5C)
// -GSM (0x30..0x39,..., 0x3B, 0x30..0x39,..., 0x20, 0x42)
// -GAA ({0x30,0x31}, 0x20, 0x5D)
// -SRC (0x30..0x33, 0x3B, 0x30..0x39 * 4, 0x20, 0x5E)
// -TCC (0x30..0x3A, 0x3B, 0x30..0x33, 0x3B, 0x30..0x39,..., 0x20, 0x62)
// -CFS (0x30..0x39,..., 0x20, 0x65)
// -MDF (0x30..0x33, 0x20, 0x64)
// -XCS (0x30..0x31, 0x20, 0x66)
// PRA (0x30..0x39,..., 0x20, 0x68)
// -ACS (0x30, 0x20, 0x69)....(0x9B, 0x31,0x20,0x69) {(0x9B,{0x32,0x34},0x20,0x69)...(0x9B,{0x33,0x35},0x20, 0x69)}+
// -SCS (0x6F)(0x9B,...).....(0x9B,0x6F)
default:
mp_msg(MSGT_DECSUB, MSGL_V,
"invalid/un-supported CSI. terminating code:0x%02hhx len:%d\n", buf[i], i);
}
i++;
buf += i;
buf_size -= i;
break;
// non-supported control codes
case 0x9D: // -TIME (0x20, 0x40..0x7F) or (0x28, 0x40..0x43) or (0x29, ...., 0x40..0x43)
if (buf_size < 2)
return NULL;
if (buf[0] == 0x20 || buf[0] == 0x28) {
if (buf[0] == 0x20) {
// wait. copy & split out as another event
ctx->duration = (buf[1] - 0x40) * 100;
append_event(ctx);
}
buf += 2;
buf_size -= 2;
break;
}
if (buf[0] != 0x29)
return NULL;
while (buf_size > 0 && (buf[0] & 0xFC) != 0x40) {
buf++;
buf_size--;
}
if ((buf[0] & 0xFC) != 0x40)
return NULL;
// buf_size > 0
buf++;
buf_size--;
mp_msg(MSGT_DECSUB, MSGL_V,
"un-supported control code: 0x%02hhx\n", code);
break;
case 0x95: // -MACRO (0x4F) or (0x40..0x41, 0x21..0x7E, ...., 0x95, 0x4F)
if (buf_size < 1)
return NULL;
if (buf[0] == 0x40 || buf[0] == 0x41) {
while (buf_size > 1 && buf[0] != 0x95) {
buf++;
buf_size--;
}
if (buf[0] != 0x95)
return NULL;
// buf_size > 1
buf++;
buf_size--;
}
if (buf[0] != 0x4F)
return NULL;
buf++;
buf_size--;
mp_msg(MSGT_DECSUB, MSGL_V,
"un-supported control code: 0x%02hhx\n", code);
break;
case 0x92: // -CDC ({0x40,0x4F}) or (0x20, 0x40..0x4A)
if (buf_size >= 2 && buf[0] == 0x20) {
buf++;
buf_size--;
}
// fall through
case 0x91: // -FLC ({0x40,0x47,0x4F})
case 0x93: // -POL (0x40..0x42)
case 0x94: // -WHM ({0x40,0x44,0x45})
case 0x97: // -HLC (0x40..0x4F)
if (buf_size < 1)
return NULL;
buf++;
buf_size--;
// fall through
case 0x08: // APB
case 0x0A: // APD
case 0x0B: // -APU
default: // un-used/unknown control codes
mp_msg(MSGT_DECSUB, MSGL_V,
"unknown/ignored control code: 0x%02hhx\n", code);
break;
}
}
return buf;
}
static const uint8_t * const combining1[] = {
"\xcc\x81", // U+0301 combinng accute accent
"\xcc\x80", // U+0300 combining grave accent
"\xcc\x88", // U+0308 combining diaeresis
"\xcc\x82", // U+0302 combining circumflex accent
"\xcc\x84", // U+0304 combining macron
"\xcc\xb2", // U+0332 combining low line
};
static const uint8_t * const hira_symbols[9] = {
"\xe3\x82\x9d" /* U+309D */, "\xe3\x82\x9e" /* U+309E */,
"\xe3\x83\xbc" /* U+30FC */, "\xe3\x80\x82" /* U+3002 */,
"\xe3\x80\x8c" /* U+300C */, "\xe3\x80\x8d" /* U+300D */,
"\xe3\x80\x81" /* U+3001 */, "\xe3\x83\xbb" /* U+30FB */,
};
static const uint8_t * const kata_symbols[9] = {
"\xe3\x83\xbd" /* U+30FD */, "\xe3\x83\xbe" /* U+30FE */,
"\xe3\x83\xbc" /* U+30FC */, "\xe3\x80\x82" /* U+3002 */,
"\xe3\x80\x8c" /* U+300C */, "\xe3\x80\x8d" /* U+300D */,
"\xe3\x80\x81" /* U+3001 */, "\xe3\x83\xbb" /* U+30FB */,
};
/* ARIB STD B24 table.7-11 -> UTF-8 */
static const uint8_t * const trans_ext85[] = {
/* 1- 10 */
"\xe3\x90\x82", "\xf0\xa0\x85\x98", "\xe4\xbb\xbd", "\xe4\xbb\xbf", "\xe4\xbe\x9a",
"\xe4\xbf\x89", "\xe5\x82\x9c", "\xe5\x84\x9e", "\xe5\x86\xbc", "\xe3\x94\x9f",
/* 11- 20 */
"\xe5\x8c\x87", "\xe5\x8d\xa1", "\xe5\x8d\xac", "\xe5\xa9\xb9", "\xf0\xa0\xae\xb7",
"\xe5\x91\x8d", "\xe5\x92\x96", "\xe5\x92\x9c", "\xe5\x92\xa9", "\xe5\x94\x8e",
/* 21- 30 */
"\xe5\x95\x8a", "\xe5\x99\xb2", "\xe5\x9b\xa4", "\xe5\x9c\xb3", "\xe5\x9c\xb4",
"\xef\xa8\x90", "\xe5\xa2\x80", "\xe5\xa7\xa4", "\xe5\xa8\xa3", "\xe5\xa9\x95",
/* 31- 40 */
"\xe5\xaf\xac", "\xef\xa8\x91", "\xe3\x9f\xa2", "\xe5\xba\xac", "\xe5\xbc\xb4",
"\xe5\xbd\x85", "\xe5\xbe\xb7", "\xe6\x80\x97", "\xe6\x81\xb5", "\xe6\x84\xb0",
/* 41- 50 */
"\xe6\x98\xa4", "\xe6\x9b\x88", "\xe6\x9b\x99", "\xe6\x9b\xba", "\xe6\x9b\xbb",
"\xee\x82\x88", "\xe6\xa2\x81", "\xe6\xa4\x91", "\xe6\xa4\xbb", "\xe6\xa9\x85",
/* 51- 60 */
"\xe6\xaa\x91", "\xe6\xab\x9b", "\xf0\xa3\x8f\x8c", "\xf0\xa3\x8f\xbe", "\xf0\xa3\x97\x84",
"\xe6\xaf\xb1", "\xe6\xb3\xa0", "\xe6\xb4\xae", "\xef\xa9\x85", "\xe6\xb6\xbf",
/* 61- 70 */
"\xe6\xb7\x8a", "\xe6\xb7\xb8", "\xef\xa9\x86", "\xe6\xbd\x9e", "\xe6\xbf\xb9",
"\xe7\x81\xa4", "\xe7\x85\x95", "\xf0\xa4\x8b\xae", "\xe7\x85\x87", "\xe7\x87\x81",
/* 71- 80 */
"\xe7\x88\x80", "\xe7\x8e\x9f", "\xe7\x8e\xa8", "\xe7\x8f\x89", "\xe7\x8f\x96",
"\xe7\x90\x9b", "\xe7\x90\xa1", "\xef\xa9\x8a", "\xe7\x90\xa6", "\xe7\x90\xaa",
/* 81- 90 */
"\xe7\x90\xac", "\xe7\x90\xb9", "\xe7\x91\x8b", "\xe3\xbb\x9a", "\xe7\x95\xb5",
"\xe7\x96\x81", "\xe7\x9d\xb2", "\xe4\x82\x93", "\xe7\xa3\x88", "\xe7\xa3\xa0",
/* 91-100 */
"\xe7\xa5\x87", "\xe7\xa6\xae", "\xe7\xa5\x93", "\xe8\xa2\x82", "\xe8\xa5\xa6",
"\xe7\xa7\x9a", "\xe7\xa8\x9e", "\xe7\xad\xbf", "\xe7\xb0\xb1", "\xe4\x89\xa4",
/* 101-110 */
"\xe7\xb6\x8b", "\xe7\xbe\xa1", "\xe8\x84\x98", "\xe8\x84\xba", "\xe8\x88\x98",
"\xe8\x8a\xae", "\xe8\x91\x9b", "\xe8\x93\x9c", "\xe8\x93\xac", "\xe8\x95\x99",
/* 111-120 */
"\xe8\x97\x8e", "\xe8\x9d\x95", "\xe8\x9f\xac", "\xe8\xa0\x8b", "\xe8\xa3\xb5",
"\xe8\xa7\x92", "\xe8\xab\xb6", "\xe8\xb7\x8e", "\xe8\xbe\xbb", "\xe8\xbf\xb6",
/* 121-130 */
"\xe9\x83\x9d", "\xe9\x84\xa7", "\xe9\x84\xad", "\xe9\x86\xb2", "\xe9\x88\xb3",
"\xe9\x8a\x88", "\xe9\x8c\xa1", "\xe9\x8d\x88", "\xe9\x96\x92", "\xe9\x9b\x9e",
/* 131-137 */
"\xe9\xa4\x83", "\xe9\xa5\x80", "\xe9\xab\x99", "\xe9\xaf\x96", "\xe9\xb7\x97",
"\xe9\xba\xb4", "\xe9\xba\xb5",
};
static const uint8_t * const trans_ext90[][96] = {
{ /* row 90 */
/* 1- 10 */
"\xe2\x9b\x8c", "\xe2\x9b\x8d", "\xe2\x9d\x97", "\xe2\x9b\x8f", "\xe2\x9b\x90",
"\xe2\x9b\x91", "", "\xe2\x9b\x92", "\xe2\x9b\x95", "\xe2\x9b\x93",
/* 11- 20 */
"\xe2\x9b\x94", "", "", "", "", "",
"\xf0\x9f\x85\xbf", "\xf0\x9f\x86\x8a", "", "", "\xe2\x9b\x96",
/* 21- 30 */
"\xe2\x9b\x97", "\xe2\x9b\x98", "\xe2\x9b\x99", "\xe2\x9b\x9a", "\xe2\x9b\x9b",
"\xe2\x9b\x9c", "\xe2\x9b\x9d", "\xe2\x9b\x9e", "\xe2\x9b\x9f", "\xe2\x9b\xa0",
/* 31- 40 */
"\xe2\x9b\xa1", "\xe2\xad\x95", "\xe3\x89\x88", "\xe3\x89\x89", "\xe3\x89\x8a",
"\xe3\x89\x8b", "\xe3\x89\x8c", "\xe3\x89\x8d", "\xe3\x89\x8e", "\xe3\x89\x8f",
/* 41- 50 */
"", "", "", "", "\xe2\x92\x91",
"\xe2\x92\x92", "\xe2\x92\x93", "\xf0\x9f\x85\x8a", "\xf0\x9f\x85\x8c", "\xf0\x9f\x84\xbf",
/* 51- 60 */
"\xf0\x9f\x85\x86", "\xf0\x9f\x85\x8b", "\xf0\x9f\x88\x90", "\xf0\x9f\x88\x91", "\xf0\x9f\x88\x92",
"\xf0\x9f\x88\x93", "\xf0\x9f\x85\x82", "\xf0\x9f\x88\x94", "\xf0\x9f\x88\x95", "\xf0\x9f\x88\x96",
/* 61- 70 */
"\xf0\x9f\x85\x8d", "\xf0\x9f\x84\xb1", "\xf0\x9f\x84\xbd", "\xe2\xac\x9b", "\xe2\xac\xa4",
"\xf0\x9f\x88\x97", "\xf0\x9f\x88\x98", "\xf0\x9f\x88\x99", "\xf0\x9f\x88\x9a", "\xf0\x9f\x88\x9b",
/* 71- 80 */
"\xe2\x9a\xbf", "\xf0\x9f\x88\x9c", "\xf0\x9f\x88\x9d", "\xf0\x9f\x88\x9e", "\xf0\x9f\x88\x9f",
"\xf0\x9f\x88\xa0", "\xf0\x9f\x88\xa1", "\xf0\x9f\x88\xa2", "\xf0\x9f\x88\xa3", "\xf0\x9f\x88\xa4",
/* 81- 84 */
"\xf0\x9f\x88\xa5", "\xf0\x9f\x85\x8e", "\xe3\x8a\x99", "\xf0\x9f\x88\x80",
},
{ /* row 91 */
/* 1- 10 */
"\xe2\x9b\xa3", "\xe2\xad\x96", "\xe2\xad\x97", "\xe2\xad\x98", "\xe2\xad\x99",
"\xe2\x98\x93", "\xe3\x8a\x8b", "\xe3\x80\x92", "\xe2\x9b\xa8", "\xe3\x89\x86",
/* 11- 20 */
"\xe3\x89\x85", "\xe2\x9b\xa9", "\xe0\xbf\x96", "\xe2\x9b\xaa", "\xe2\x9b\xab",
"\xe2\x9b\xac", "\xe2\x99\xa8", "\xe2\x9b\xad", "\xe2\x9b\xae", "\xe2\x9b\xaf",
/* 21- 30 */
"\xe2\x9a\x93", "\xe2\x9c\x88", "\xe2\x9b\xb0", "\xe2\x9b\xb1", "\xe2\x9b\xb2",
"\xe2\x9b\xb3", "\xe2\x9b\xb4", "\xe2\x9b\xb5", "\xf0\x9f\x85\x97", "\xe2\x92\xb9",
/* 31- 40 */
"\xe2\x93\x88", "\xe2\x9b\xb6", "\xf0\x9f\x85\x9f", "\xf0\x9f\x86\x8b", "\xf0\x9f\x86\x8d",
"\xf0\x9f\x86\x8c", "\xf0\x9f\x85\xb9", "\xe2\x9b\xb7", "\xe2\x9b\xb8", "\xe2\x9b\xb9",
/* 41- 49 */
"\xe2\x9b\xba", "\xf0\x9f\x85\xbb", "\xe2\x98\x8e", "\xe2\x9b\xbb", "\xe2\x9b\xbc",
"\xe2\x9b\xbd", "\xe2\x9b\xbe", "\xf0\x9f\x85\xbc", "\xe2\x9b\xbf",
},
{ /* row 92 */
/* 1- 10 */
"\xe2\x9e\xa1", "\xe2\xac\x85", "\xe2\xac\x86", "\xe2\xac\x87", "\xe2\xac\xaf",
"\xe2\xac\xae", "\xe5\xb9\xb4", "\xe6\x9c\x88", "\xe6\x97\xa5", "\xe5\x86\x86",
/* 11- 20 */
"\xe3\x8e\xa1", "\xe3\x8e\xa5", "\xe3\x8e\x9d", "\xe3\x8e\xa0", "\xe3\x8e\xa4",
"\xf0\x9f\x84\x80", "\xe2\x92\x88", "\xe2\x92\x89", "\xe2\x92\x8a", "\xe2\x92\x8b",
/* 21- 30 */
"\xe2\x92\x8c", "\xe2\x92\x8d", "\xe2\x92\x8e", "\xe2\x92\x8f", "\xe2\x92\x90",
"\xe6\xb0\x8f", "\xe5\x89\xaf", "\xe5\x85\x83", "\xe6\x95\x85", "\xe5\x89\x8d",
/* 31- 40 */
"\xe6\x96\xb0", "\xf0\x9f\x84\x81", "\xf0\x9f\x84\x82", "\xf0\x9f\x84\x83", "\xf0\x9f\x84\x84",
"\xf0\x9f\x84\x85", "\xf0\x9f\x84\x86", "\xf0\x9f\x84\x87", "\xf0\x9f\x84\x88", "\xf0\x9f\x84\x89",
/* 41- 50 */
"\xf0\x9f\x84\x8a", "\xe3\x88\xb3", "\xe3\x88\xb6", "\xe3\x88\xb2", "\xe3\x88\xb1",
"\xe3\x88\xb9", "\xe3\x89\x84", "\xe2\x96\xb6", "\xe2\x97\x80", "\xe3\x80\x96",
/* 51- 60 */
"\xe3\x80\x97", "\xe2\x9f\x90", "\xc2\xb2", "\xc2\xb3", "\xf0\x9f\x84\xad",
/* no unicode chars for musical score symbols (56...85) */
"?", "?", "?", "?", "?",
/* 61- 70 */
"?", "?", "?", "?", "?", "?", "?", "?", "?", "?",
/* 71- 80 */
"?", "?", "?", "?", "?", "?", "?", "?", "?", "?",
/* 81- 90 */
"?", "?", "?", "?", "?",
"\xf0\x9f\x84\xac", "\xf0\x9f\x84\xab", "\xe3\x89\x87", "\xf0\x9f\x86\x90", "\xf0\x9f\x88\xa6",
/* 91 */
"\xe2\x84\xbb",
},
{ /* row 93 */
/* 1- 10 */
"\xe3\x88\xaa", "\xe3\x88\xab", "\xe3\x88\xac", "\xe3\x88\xad", "\xe3\x88\xae",
"\xe3\x88\xaf", "\xe3\x88\xb0", "\xe3\x88\xb7", "\xe3\x8d\xbe", "\xe3\x8d\xbd",
/* 11- 20 */
"\xe3\x8d\xbc", "\xe3\x8d\xbb", "\xe2\x84\x96", "\xe2\x84\xa1", "\xe3\x80\xb6",
"\xe2\x9a\xbe", "\xf0\x9f\x89\x80", "\xf0\x9f\x89\x81", "\xf0\x9f\x89\x82", "\xf0\x9f\x89\x83",
/* 21- 30 */
"\xf0\x9f\x89\x84", "\xf0\x9f\x89\x85", "\xf0\x9f\x89\x86", "\xf0\x9f\x89\x87", "\xf0\x9f\x89\x88",
"\xf0\x9f\x84\xaa", "\xf0\x9f\x88\xa7", "\xf0\x9f\x88\xa8", "\xf0\x9f\x88\xa9", "\xf0\x9f\x88\x94",
/* 31- 40 */
"\xf0\x9f\x88\xaa", "\xf0\x9f\x88\xab", "\xf0\x9f\x88\xac", "\xf0\x9f\x88\xad", "\xf0\x9f\x88\xae",
"\xf0\x9f\x88\xaf", "\xf0\x9f\x88\xb0", "\xf0\x9f\x88\xb1", "\xe2\x84\x93", "\xe3\x8e\x8f",
/* 41- 50 */
"\xe3\x8e\x90", "\xe3\x8f\x8a", "\xe3\x8e\x9e", "\xe3\x8e\xa2", "\xe3\x8d\xb1",
"", "", "\xc2\xbd", "\xe2\x86\x89", "\xe2\x85\x93",
/* 51- 60 */
"\xe2\x85\x94", "\xc2\xbc", "\xc2\xbe", "\xe2\x85\x95", "\xe2\x85\x96",
"\xe2\x85\x97", "\xe2\x85\x98", "\xe2\x85\x99", "\xe2\x85\x9a", "\xe2\x85\x90",
/* 61- 70 */
"\xe2\x85\x9b", "\xe2\x85\x91", "\xe2\x85\x92", "\xe2\x98\x80", "\xe2\x98\x81",
"\xe2\x98\x82", "\xe2\x9b\x84", "\xe2\x98\x96", "\xe2\x98\x97", "\xe2\x9b\x89",
/* 71- 80 */
"\xe2\x9b\x8a", "\xe2\x99\xa6", "\xe2\x99\xa5", "\xe2\x99\xa3","\xe2\x99\xa0",
"\xe2\x9b\x8b", "\xe2\xa8\x80", "\xe2\x80\xbc", "\xe2\x81\x89", "\xe2\x9b\x85",
/* 81- 90 */
"\xe2\x98\x94", "\xe2\x9b\x86", "\xe2\x98\x83", "\xe2\x9b\x87", "\xe2\x9a\xa1",
"\xe2\x9b\x87", "", "\xe2\x9a\x9e", "\xe2\x9a\x9f", "\xe2\x99\xac",
/* 91 */
"\xe2\x98\x8e",
},
{ /* row 94 */
/* 1- 10 */
"\xe2\x85\xa0", "\xe2\x85\xa1", "\xe2\x85\xa2", "\xe2\x85\xa3", "\xe2\x85\xa4",
"\xe2\x85\xa5", "\xe2\x85\xa6", "\xe2\x85\xa7", "\xe2\x85\xa8", "\xe2\x85\xa9",
/* 11- 20 */
"\xe2\x85\xaa", "\xe2\x85\xab", "\xe2\x91\xb0", "\xe2\x91\xb1", "\xe2\x91\xb2",
"\xe2\x91\xb3", "\xe2\x91\xb4", "\xe2\x91\xb5", "\xe2\x91\xb6", "\xe2\x91\xb7",
/* 21- 30 */
"\xe2\x91\xb8", "\xe2\x91\xb9", "\xe2\x91\xba", "\xe2\x91\xbb", "\xe2\x91\xbc",
"\xe2\x91\xbd", "\xe2\x91\xbe", "\xe2\x91\xbf", "\xe3\x89\x91", "\xe3\x89\x92",
/* 31- 40 */
"\xe3\x89\x93", "\xe3\x89\x94", "\xf0\x9f\x84\x90", "\xf0\x9f\x84\x91", "\xf0\x9f\x84\x92",
"\xf0\x9f\x84\x93", "\xf0\x9f\x84\x94", "\xf0\x9f\x84\x95", "\xf0\x9f\x84\x96", "\xf0\x9f\x84\x97",
/* 41- 50 */
"\xf0\x9f\x84\x98", "\xf0\x9f\x84\x99", "\xf0\x9f\x84\x9a", "\xf0\x9f\x84\x9b", "\xf0\x9f\x84\x9c",
"\xf0\x9f\x84\x9d", "\xf0\x9f\x84\x9e", "\xf0\x9f\x84\x9f", "\xf0\x9f\x84\xa0", "\xf0\x9f\x84\xa1",
/* 51- 60 */
"\xf0\x9f\x84\xa2", "\xf0\x9f\x84\xa3", "\xf0\x9f\x84\xa4", "\xf0\x9f\x84\xa5", "\xf0\x9f\x84\xa6",
"\xf0\x9f\x84\xa7", "\xf0\x9f\x84\xa8", "\xf0\x9f\x84\xa9", "\xe3\x89\x95", "\xe3\x89\x96",
/* 61- 70 */
"\xe3\x89\x97", "\xe3\x89\x98", "\xe3\x89\x99", "\xe3\x89\x9a", "\xe2\x91\xa0",
"\xe2\x91\xa1", "\xe2\x91\xa2", "\xe2\x91\xa3", "\xe2\x91\xa4", "\xe2\x91\xa5",
/* 71- 80 */
"\xe2\x91\xa6", "\xe2\x91\xa7", "\xe2\x91\xa8", "\xe2\x91\xa9", "\xe2\x91\xaa",
"\xe2\x91\xab", "\xe2\x91\xac", "\xe2\x91\xad", "\xe2\x91\xae", "\xe2\x91\xaf",
/* 81- 90 */
"\xe2\x9d\xb6", "\xe2\x9d\xb7", "\xe2\x9d\xb8", "\xe2\x9d\xb9", "\xe2\x9d\xba",
"\xe2\x9d\xbb", "\xe2\x9d\xbc", "\xe2\x9d\xbd", "\xe2\x9d\xbe", "\xe2\x9d\xbf",
/* 91- 93 */
"\xe2\x93\xab", "\xe2\x93\xac", "\xe3\x89\x9b",
}
};
static unsigned char append_arib_char (ISDBSubContext *ctx, uint8_t c1, uint8_t c2)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
struct b24str_state *ts = &ctx->current_state.text_state;
int gidx;
uint8_t code;
size_t ilen, olen, ret;
char indata[3], *obuf, *ibuf;
const uint8_t *p;
uint8_t *q;
int len;
ibuf = indata;
// make room for a new char. (at least 6 + 1 bytes)
reserve_buf(ctx, 7);
obuf = ctx->text.buf + ctx->text.used;
olen = ctx->text.len - ctx->text.used;
if (ts->ss > 1)
gidx = ts->ss;
else if (c1 & 0x80)
gidx = ts->gr;
else
gidx = ts->gl;
code = ts->g[gidx].code;
// some hacks
if ((c1 & 0x7F) == 0x20 || (c1 & 0x7F) == 0x7F) // SP or DEL
code = CODE_ASCII;
if (code == CODE_JISX0208 && (c1 & 0x7F) >= 0x75)
code = CODE_EXT;
switch (code) {
case CODE_ASCII:
case CODE_ASCII2:
ls->in_combining = 0;
c1 &= 0x7F;
if (c1 != 0x7F)
ctx->text.buf[ctx->text.used++] = c1; // no conversion needed
else {
// replace DELL with black-square (U+25A0)
memcpy(ctx->text.buf + ctx->text.used, "\xe2\x96\xa0", 3);
ctx->text.used += 3;
}
break;
case CODE_JISX0208:
case CODE_JISX0213_1:
c1 |= 0x80;
c2 |= 0x80;
// non-spacing char?
if ((c1 == 0xA1 && c2 >= 0xAD && c2 <= 0xB2) ||
(c1 == 0xA2 && c2 == 0xFE)) {
ls->in_combining = 1;
// U+20DD (0xe2839d) combining enclosing circle
p = (c1 == 0xA1) ? combining1[c2 - 0xAD] :
(const uint8_t *) "\xe2\x83\x9d";
len = strlen(p);
memcpy(ctx->text.buf + ctx->text.used, p, len);
ctx->text.used += len;
break;
}
ls->in_combining = 0;
ibuf[0] = c1;
ibuf[1] = c2;// ibuf[]: EUC-JP2013 kanji
ilen = 2;
ret = iconv(ctx->iconv, &ibuf, &ilen, &obuf, &olen);
if (ret != (size_t) -1)
ctx->text.used += obuf - &ctx->text.buf[ctx->text.used];
else
mp_msg(MSGT_DECSUB, MSGL_INFO,
"bad data as EUC-JP(%d). sp:%lu 0x%02hhx 0x%02hhx\n",
errno, olen, c1, c2);
break;
case CODE_JISX0213_2:
ls->in_combining = 0;
c1 |= 0x80;
c2 |= 0x80;
ilen = 3;
ibuf[0] = '\x8f';
ibuf[0] = c1;
ibuf[1] = c2;
ret = iconv(ctx->iconv, &ibuf, &ilen, &obuf, &olen);
if (ret != (size_t) -1)
ctx->text.used += obuf - &ctx->text.buf[ctx->text.used];
else
mp_msg(MSGT_DECSUB, MSGL_INFO,
"bad data as EUC-JP. 0x8f 0x%02hhx 0x%02hhx\n", c1, c2);
break;
case CODE_JISX0201_KATA:
ls->in_combining = 0;
c1 &= 0x7F;
// map to U+FF60 + (c1 - 0x20) = U+FF00 + (c1 + 0x40)
c1 += 0x40;
q = ctx->text.buf + ctx->text.used;
q[0] = 0xef;
q[1] = 0xbc + (c1 >> 6);
q[2] = 0x80 + (c1 & 0x3F);
ctx->text.used += 3;
break;
case CODE_X_HIRA:
case CODE_X_HIRA_P:
case CODE_X_KATA:
case CODE_X_KATA_P:
ls->in_combining = 0;
c1 &= 0x7F;
if (c1 < 0x77) {
q = ctx->text.buf + ctx->text.used;
// hira: {U+3040+ (c1 - 0x20)} -> {U+3000 + (c1 + 0x20)}
// kata: {U+30A0 + (c1 - 0x20)} -> {U+3000 + (c1 + 0x80)}
c1 += (code == CODE_X_HIRA || code == CODE_X_HIRA_P) ? 0x20 : 0x80;
q[0] = 0xe3;
q[1] = 0x80 + (c1 >> 6);
q[2] = 0x80 + (c1 & 0x3F);
ctx->text.used += 3;
} else {
c1 -= 0x77;
p = (code == CODE_X_HIRA || code == CODE_X_HIRA_P) ?
hira_symbols[c1] : kata_symbols[c1];
len = strlen(p);
memcpy(ctx->text.buf + ctx->text.used, p, len);
ctx->text.used += len;
}
break;
case CODE_EXT: // symbols defined in row 85.86, 90..94
ls->in_combining = 0;
c1 &= 0x7F;
c2 &= 0x7F;
switch (c1) {
case 0x75: // row 85-86
case 0x76:
c2 -= 0x21;
if (c1 == 0x76)
c2 += 94;
if (c2 < 137) {
p = trans_ext85[c2];
len = strlen(p);
memcpy(ctx->text.buf + ctx->text.used, p, len);
ctx->text.used += len;
} else
mp_msg(MSGT_DECSUB, MSGL_INFO, "bad data as extra char.\n");
break;
case 0x7A: // row 90-94
case 0x7B:
case 0x7C:
case 0x7D:
case 0x7E:
c1 -= 0x7A;
c2 -= 0x21;
p = trans_ext90[c1][c2];
len = strlen(p);
if (len > 0) {
memcpy(ctx->text.buf + ctx->text.used, p, len);
ctx->text.used += len;
} else
mp_msg(MSGT_DECSUB, MSGL_INFO, "bad data as extra char.\n");
break;
default:
mp_msg(MSGT_DECSUB, MSGL_V,
"un-supported data 0x%02hhx 0x%02hhx.\n", c1, c2);
}
break;
// non-spacing mosaic chars... just skip.
// don't touch in_combining, as other non-spacing char's may follow/precede...
case CODE_MOSAIC_C:
case CODE_MOSAIC_D:
break;
case CODE_X_MACRO:
break; // processed later
default:
ls->in_combining = 0;
// replace this unknown char with U+FFFD
memcpy(ctx->text.buf + ctx->text.used, "\xef\xbf\xbd", 3);
ctx->text.used += 3;
mp_msg(MSGT_DECSUB, MSGL_V,
"unknown charset:0x%02hhx data 0x%02hhx 0x%02hhx.\n", code, c1, c2);
break;
}
ctx->text.buf[ctx->text.used] = '\0';
return code;
}
static void insert_str(ISDBSubContext *ctx, const char *txt, int begin)
{
int end = ctx->text.used;
size_t len = strlen(txt);
if (len == 0 || len > 128)
return;
reserve_buf(ctx, len + 1); // +1 for terminating '\0'
memmove(ctx->text.buf + begin + len, ctx->text.buf + begin, end - begin);
memcpy(ctx->text.buf + begin, txt, len);
ctx->text.txt_tail += len;
ctx->text.used += len;
ctx->text.buf[ctx->text.used] = '\0';
}
static void advance(ISDBSubContext *ctx);
static void fixup_linesep(ISDBSubContext *ctx)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
char tmp[16];
int lsp;
if (ls->prev_break_idx <= 0)
return;
// adjust baseline if all chars in the line are 50% tall of one font size.
if (ls->shift_baseline && IS_HORIZONTAL_LAYOUT(ls->format)) {
int delta = ls->cell_spacing.row / 4;
ls->linesep_upper += delta;
ls->line_desc -= delta;
mp_msg(MSGT_DECSUB, MSGL_V, "baseline shifted down %dpx.\n", delta);
}
// not the first line
tmp[0]='\0';
lsp = ls->prev_line_desc + ls->linesep_upper;
av_strlcatf(tmp, sizeof(tmp), "{\\lsp%d}", lsp);
insert_str(ctx, tmp, ls->prev_break_idx);
}
static void do_line_break(ISDBSubContext *ctx)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
int csp;
char tmp[32];
if (IS_HORIZONTAL_LAYOUT(ls->format))
csp = ls->cell_spacing.col * ls->font_scale.fscx / 100;
else
csp = ls->cell_spacing.col * ls->font_scale.fscy / 100;
if (ls->line_width == 0) {
if (ls->prev_line_bottom == 0) {
if (IS_HORIZONTAL_LAYOUT(ls->format))
ls->block_offset_h = csp / 2;
else
ls->block_offset_v = csp / 2;
}
// avoid empty lines by prepending a space,
// as ASS halves line-spacing of empty lines.
append_str(ctx, "\xe3\x80\x80");
advance(ctx);
}
fixup_linesep(ctx);
ls->prev_break_idx = ctx->text.used;
tmp[0] = '\0';
av_strlcatf(tmp, sizeof(tmp), "\\N{\\lsp0\\fsp%d}", ls->cell_spacing.col);
append_str(ctx, tmp);
ls->prev_line_desc = ls->line_desc;
ls->prev_line_bottom += ls->linesep_upper + ls->line_height + ls->line_desc;
ls->prev_char_sep = csp;
ls->line_height = 0;
ls->line_width = 0;
ls->line_desc = 0;
ls->linesep_upper = 0;
ls->shift_baseline = 0;
}
// a new char will be appended. check h-spacing and line-breaking.
static void pre_advance(ISDBSubContext *ctx)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
int csp, cscale;
int w;
int blocklen;
int delta;
if (IS_HORIZONTAL_LAYOUT(ls->format)) {
cscale = ls->font_scale.fscx;
w = ls->font_size * cscale / 100;
csp = ls->cell_spacing.col * cscale / 100;
if (ls->prev_line_bottom == 0 && ls->line_width == 0)
ls->block_offset_h = csp / 2;
blocklen = ls->display_area.w - ls->block_offset_h;
} else {
cscale = ls->font_scale.fscy;
w = ls->font_size * cscale / 100;
csp = ls->cell_spacing.col * cscale / 100;
if (ls->prev_line_bottom == 0 && ls->line_width == 0)
ls->block_offset_v = csp / 2;
blocklen = ls->display_area.h - ls->block_offset_v;
}
if (csp != ls->prev_char_sep)
delta = ((ls->prev_char_sep + 1) / 2 + csp / 2) - ls->prev_char_sep;
else
delta = 0;
// check line break;
// note: at the head of a line, line breaking is useless.
if (ls->line_width != 0 &&
ls->line_width + delta + w > blocklen) {
mp_msg(MSGT_DECSUB, MSGL_V, "auto line break at %lu.\n",
ctx->text.used);
do_line_break(ctx);
} else if (ls->line_width != 0 && delta != 0) {
char tmp[32];
// need to compensate the wrong charsep of the previously added char.
// hack: insert zero-width space with the compensating \fsp. (can be <0)
// note that \fsp is scaled by \fscx in ASS.
tmp[0] = '\0';
av_strlcatf(tmp, sizeof(tmp), "{\\fsp%d}\xe2\x80\x8b{\\fsp%d}",
delta * 100 / cscale, ls->cell_spacing.col);
append_str(ctx, tmp);
ls->line_width += delta;
}
}
static void advance(ISDBSubContext *ctx)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
int cscale;
int h;
int asc, desc;
int csp;
if (IS_HORIZONTAL_LAYOUT(ls->format)) {
cscale = ls->font_scale.fscx;
h = ls->font_size * ls->font_scale.fscy / 100;
if (ls->font_scale.fscy == 200) {
desc = ls->cell_spacing.row / 2;
asc = ls->cell_spacing.row * 2 - desc + h;
} else {
desc = ls->cell_spacing.row * ls->font_scale.fscy / 200;
asc = ls->cell_spacing.row * ls->font_scale.fscy / 100 - desc + h;
}
if (asc > ls->line_height + ls->linesep_upper) {
if (h > ls->line_height)
ls->line_height = h;
ls->linesep_upper = asc - ls->line_height;
} else if (h > ls->line_height) {
ls->linesep_upper = ls->line_height + ls->linesep_upper - h;
ls->line_height = h;
}
if (ls->prev_line_bottom == 0 && ls->linesep_upper > ls->block_offset_v)
ls->block_offset_v = ls->linesep_upper;
if (ls->font_scale.fscy != 50)
ls->shift_baseline = 0;
} else {
int lsp;
cscale = ls->font_scale.fscy;
h = ls->font_size * ls->font_scale.fscx / 100;
lsp = ls->cell_spacing.row * ls->font_scale.fscx / 100;
desc = h / 2 + lsp / 2;
asc = h - h / 2 + lsp - lsp / 2;
if (asc > ls->line_height + ls->linesep_upper) {
if (h - h / 2 > ls->line_height)
ls->line_height = h - h / 2;
ls->linesep_upper = asc - ls->line_height;
} else if (h - h / 2 > ls->line_height) {
ls->linesep_upper = ls->line_height + ls->linesep_upper - h + h / 2;
ls->line_height = h - h / 2;
}
if (ls->prev_line_bottom == 0 && ls->linesep_upper > ls->block_offset_h)
ls->block_offset_h = ls->linesep_upper;
ls->shift_baseline = 0;
}
if (desc > ls->line_desc)
ls->line_desc = desc;
csp = ls->cell_spacing.col * cscale / 100;
ls->line_width += ls->font_size * cscale / 100 + csp;
ls->prev_char_sep = csp;
}
static void forward_position(ISDBSubContext *ctx, int rows, int cols)
{
int i;
for (i = 0; i < rows; i++)
do_line_break(ctx);
for (i = 0; i < cols; i++) {
pre_advance(ctx);
append_str(ctx, "\xe3\x80\x80");
advance(ctx);
}
if (rows > 0 && cols > 0)
ctx->current_state.layout_state.shift_baseline = 1;
}
static const uint8_t *proc_char(ISDBSubContext *ctx,
const uint8_t *buf, int buf_size)
{
struct isdbsub_layout *ls = &ctx->current_state.layout_state;
struct b24str_state *ts = &ctx->current_state.text_state;
unsigned int begin, end;
char *ch = NULL;
begin = end = ctx->text.used;
while (buf_size > 0 && (buf[0] & 0x60) != 0) {
uint8_t c1, c2 = 0;
unsigned char ctype;
c1 = buf[0];
buf ++;
buf_size --;
if ((c1 & 0x7F) != 0x20 && (c1 & 0x7F) != 0x7F) { // not (SP or DEL)
// multi-byte?
if ((ts->ss > 0 && ts->g[ts->ss].mb > 1)
|| (!ts->ss && (c1 & 0x80) && ts->g[ts->gr].mb > 1)
|| (!ts->ss && !(c1 & 0x80) && ts->g[ts->gl].mb > 1)) {
if (buf_size < 1)
return NULL;
c2 = buf[0];
buf ++;
buf_size --;
}
}
if (!ls->in_combining) {
pre_advance(ctx);
begin = end = ctx->text.used;
}
ctype = append_arib_char(ctx, c1, c2);
ts->ss = 0;
if (ctype == 0)
continue;
if (ctype == CODE_X_MACRO) {
mp_msg(MSGT_DECSUB, MSGL_DBG2, "macro 0x%02hhx.\n", c1);
if ((c1 & 0x70) == 0x60) {
c1 &= 0x0F;
proc_ctl(ctx, Default_macro[c1], strlen(Default_macro[c1]));
}
continue;
}
// if non-spacing sequence has terminated,..
if (!ls->in_combining && end - begin > 0 && ctx->text.used > end) {
char tmp[8]; // UTF-8 char is 6bytes at maximum....
unsigned int len;
len = ctx->text.used - end;
if (len > sizeof(tmp)) // for safety.
len = sizeof(tmp);
memcpy(tmp, ctx->text.buf + end, len);
memmove(ctx->text.buf + begin + len, ctx->text.buf + begin, end - begin);
memcpy(ctx->text.buf + begin, tmp, len);
mp_msg(MSGT_DECSUB, MSGL_V,
"moved the terminating spacing char to the head(%d).\n", begin);
}
end = ctx->text.used;
if (ls->in_combining)
continue;
// ls->in_combining == 0
advance(ctx);
// repeat
if (ls->repeat_count >= 0) {
// save the repeating char first.
ch = calloc(1, end - begin + 1);
if (ch)
memcpy(ch, ctx->text.buf + begin, end - begin);
else {
mp_msg(MSGT_DECSUB, MSGL_WARN,
"out of memory for repeating char.\n");
ls->repeat_count = -1;
}
}
if (ls->repeat_count == 0) {
pre_advance(ctx);
while (ls->line_width != 0) {
append_str(ctx, ch);
advance(ctx);
// prepare for the next one.
pre_advance(ctx);
}
} else if (ls->repeat_count > 0) {
while (--ls->repeat_count > 0) {
pre_advance(ctx);
append_str(ctx, ch);
advance(ctx);
}
}
if (ls->repeat_count > -1)
free(ch);
ls->repeat_count = -1;
}
if (buf_size <= 0)
return NULL;
// buf_size > 0 => assert((buf[0] & 0x60) == 0);
return buf;
}
static void process_txt_du(ISDBSubContext *ctx, const uint8_t *buf,
int buf_size)
{
const char *p, *q;
ctx->current_state = ctx->default_states[ctx->lang_tag];
reset_state(&ctx->current_state);
iconv(ctx->iconv, NULL, NULL, NULL, NULL);
p = buf;
while (buf_size > 0) {
q = proc_ctl(ctx, p, buf_size); // update ctx->current_state
if (!q)
break;
buf_size -= q - p;
p = q;
q = proc_char(ctx, p, buf_size); // append to ctx->text
ctx->text.txt_tail = ctx->text.used;
if (!q)
break;
buf_size -= q - p;
p = q;
if (ctx->current_state.need_init)
setup_line_head(ctx);
}
}
static void process_mngmnt_dg(ISDBSubContext *ctx, const uint8_t *buf,
int buf_size)
{
const uint8_t *p = buf;
int lang_num;
int du_loop_len;
int i;
ctx->timing_mode = *(p++) >> 6;
if (ctx->timing_mode == ISDBSUB_TMD_OFFSET) {
ctx->offset.hour = (p[0] >> 4) * 10 + (p[0] & 0x0f);
ctx->offset.min = (p[1] >> 4) * 10 + (p[1] & 0x0f);
ctx->offset.sec = (p[2] >> 4) * 10 + (p[2] & 0x0f);
ctx->offset.ms = (p[3] >> 4) * 100 + (p[3] & 0x0f) * 10 + (p[4] >> 4);
p += 5;
}
lang_num = *(p++);
for (i = 0; i < lang_num; i++) {
int lang_tag = *p >> 5;
int disp_mode = *p & 0x0f;
p++;
if ((disp_mode & 0x0c) == 0x0c)
p++; // skip display condition
p += 3; // skip lang code
if (lang_tag < ISDBSUB_MAX_LANG) {
ISDBSubState *state = &ctx->default_states[lang_tag];
int format;
state->auto_display = disp_mode >> 3;
state->rollup_mode = (*p & 0x03);
format = *p >> 4;
state->layout_state.is_profile_c = (format == 0x0F);
if (state->layout_state.is_profile_c)
format = ISDBSUB_FMT_960H;
if (format < ISDBSUB_FMT_960H || format > ISDBSUB_FMT_720V) {
mp_msg(MSGT_DECSUB, MSGL_INFO,
"illegal format:0x%02x in sub. mngment data.\n", format);
format = ISDBSUB_FMT_960H; // fallback
}
state->layout_state.format = format;
if ((*p & 0x0c) != 0x00)
mp_msg(MSGT_DECSUB, MSGL_WARN,
"char encoding:%d not supported.\n", ((*p & 0x0c) >> 2));
init_layout(&state->layout_state);
reset_state(state);
clear_text(ctx);
}
p++;
}
ctx->current_state = ctx->default_states[ctx->lang_tag];
set_format(ctx);
du_loop_len = AV_RB24(p);
p += 3;
if (p + du_loop_len > buf + buf_size)
du_loop_len = buf + buf_size - p;
while (du_loop_len >= 5) {
int du_size = AV_RB24(p + 2);
if (p[0] != ISDBSUB_UNIT_SEP || du_loop_len < 5 + du_size)
break;
du_loop_len -= (5 + du_size);
// ignore DRCS. only txt data units.
if (p[1] == ISDBSUB_DU_TYPE_TXT) {
memdump(MSGL_DBG2, p + 5, du_size);
process_txt_du(ctx, p + 5, du_size); // control seq. only
// copy back SWF, SDP, SDF,...
ctx->default_states[ctx->lang_tag] = ctx->current_state;
}
p += 5 + du_size;
}
}
static void process_sub_dg(ISDBSubContext *ctx, const uint8_t *buf,
int buf_size)
{
const uint8_t *p = buf;
int du_loop_len;
if (ctx->pts == AV_NOPTS_VALUE) {
mp_msg(MSGT_DECSUB, MSGL_INFO, "no timestamp on subtitle text data.");
return;
}
clear_text(ctx);
setup_line_head(ctx);
ctx->timing_mode = *(p++) >> 6;
// subtitle data should be TMD_FREE (ARIB TR-B15).
if (ctx->timing_mode != ISDBSUB_TMD_FREE) {
ctx->start.hour = (p[0] >> 4) * 10 + (p[0] & 0x0f);
ctx->start.min = (p[1] >> 4) * 10 + (p[1] & 0x0f);
ctx->start.sec = (p[2] >> 4) * 10 + (p[2] & 0x0f);
ctx->start.ms = (p[3] >> 4) * 100 + (p[3] & 0x0f) * 10 + (p[4] >> 4);
if (ctx->timing_mode == ISDBSUB_TMD_OFFSET) {
ctx->start.ms += ctx->offset.ms;
if (ctx->start.ms >= 1000) {
ctx->start.ms -= 1000;
ctx->start.sec ++;
}
ctx->start.sec += ctx->offset.sec;
if (ctx->start.sec >= 60) {
ctx->start.sec -= 60;
ctx->start.min ++;
}
ctx->start.min += ctx->offset.min;
if (ctx->start.min >= 60) {
ctx->start.min -= 60;
ctx->start.hour ++;
}
ctx->start.hour += ctx->offset.hour;
}
p += 5;
}
du_loop_len = AV_RB24(p);
p += 3;
if (p + du_loop_len > buf + buf_size)
du_loop_len = buf + buf_size - p;
while (du_loop_len >= 5) {
int du_size = AV_RB24(p + 2);
if (p[0] != ISDBSUB_UNIT_SEP || du_loop_len < 5 + du_size)
break;
du_loop_len -= (5 + du_size);
// ignore DRCS. only txt data units.
if (p[1] == ISDBSUB_DU_TYPE_TXT) {
memdump(MSGL_DBG2, p + 5, du_size);
process_txt_du(ctx, p + 5, du_size);
}
p += 5 + du_size;
}
append_event(ctx);
}
static int do_init(ISDBSubContext *ctx)
{
int i;
for (i = 65; i < 73; i++)
Default_clut[i] = Default_clut[i - 65] & ~RGBA(0,0,0,128);
for (i = 73; i < 128; i++)
Default_clut[i] = Default_clut[i - 64] & ~RGBA(0,0,0,128);
Crc_table = av_crc_get_table(AV_CRC_16_CCITT);
ctx->last_mngmnt_id = ISDBSUB_NO_DGID;
ctx->last_mngmnt_pts = AV_NOPTS_VALUE;
ctx->iconv = iconv_open("UTF-8", "EUC-JISX0213");
if (ctx->iconv == (iconv_t) -1)
return -1;
return 0;
}
void isdbsub_reset(struct sh_sub *sh)
{
ISDBSubContext *ctx = sh->context;
if (!ctx)
return;
if (ctx->iconv != (iconv_t) -1)
iconv_close(ctx->iconv);
free(ctx->text.buf);
free(ctx->script_info);
free(ctx->events);
free(ctx);
sh->context = NULL;
}
/**
* Decode a ISDB subtitle packet.
* \return < 0 on error, 'a' if further processing is needed
*/
int isdbsub_decode(struct sh_sub *sh, uint8_t **data, int *size,
double *pts, double *endpts)
{
ISDBSubContext *ctx = sh->context;
const uint8_t *buf = *data;
int buf_size = *size;
int64_t orig_pts;
const uint8_t *p, *p_end;
mp_msg(MSGT_DECSUB, MSGL_V, "ISDB sub packet:\n");
memdump(MSGL_DBG2, buf, buf_size);
ass_font_scale = 1.;
if (!ctx) {
ctx = sh->context = calloc(1, sizeof(ISDBSubContext));
if (!sh->context || do_init(sh->context)) {
mp_msg(MSGT_DECSUB, MSGL_WARN,
"Could not initialize codec context.\n");
return -1;
}
}
if (buf_size <= 10 || buf[0] != ISDBSUB_DATA_ID || buf[1] != 0xff) {
mp_msg(MSGT_DECSUB, MSGL_INFO, "incomplete or broken packet\n");
return -1;
}
/* set default output */
*data = NULL;
*size = 0;
orig_pts = (*pts != MP_NOPTS_VALUE) ? *pts * 1000 : AV_NOPTS_VALUE;
ctx->pts = orig_pts;
ctx->duration = 5 * 1000; //temporary.
ctx->lang_tag = sh->cur_lang_tag;
if (orig_pts != AV_NOPTS_VALUE && ctx->last_mngmnt_pts != AV_NOPTS_VALUE) {
int64_t t = orig_pts;
char t1[16], t2[16];
// check pts timeout, but taking PTS wrap-around into account.
if (t < ctx->last_mngmnt_pts &&
(ctx->last_mngmnt_pts - t) > (MPEGTS_MAX_PTS >> 1)) {
mp_msg(MSGT_DECSUB, MSGL_INFO, "PTS wrap-around %s -> %s\n",
pts_to_str(ctx->last_mngmnt_pts, t1), pts_to_str(t, t2));
t += MPEGTS_MAX_PTS;
}
if (t < ctx->last_mngmnt_pts ||
t - ctx->last_mngmnt_pts > ISDBSUB_MGMNT_TIMEOUT) {
mp_msg(MSGT_DECSUB, MSGL_INFO,
"Subtitle Management DataGroup time-out. %s -> %s\n",
pts_to_str(ctx->last_mngmnt_pts, t1), pts_to_str(t, t2));
ctx->last_mngmnt_pts = AV_NOPTS_VALUE;
}
}
p = buf + 3 + (buf[2] & 0x0f);
p_end = buf + buf_size;
while (p_end - p >= 7) {
int dg_id = p[0] >> 2;
int dg_size = AV_RB16(p + 3);
uint32_t crc = 0;
if (p + 5 + dg_size + 2 > p_end ||
(crc = av_crc(Crc_table, 0, p, 5 + dg_size + 2)) != 0) {
mp_msg(MSGT_DECSUB, MSGL_INFO, "incomplete or broken packet. "
"ofs:%ld l:%d crc:0x%04hx\n", p - buf, 5 + dg_size + 2, crc);
return -1;
}
p += 5;
if ((dg_id & 0x0f) == 0) { // subtile management data group
char t1[16];
if (dg_id != ctx->last_mngmnt_id
|| ctx->last_mngmnt_pts == AV_NOPTS_VALUE)
process_mngmnt_dg(ctx, p, dg_size);
else
mp_msg(MSGT_DECSUB, MSGL_DBG2,
"Skip the same subtitle management data group.\n");
ctx->last_mngmnt_id = dg_id;
ctx->last_mngmnt_pts = orig_pts;
mp_msg(MSGT_DECSUB, MSGL_DBG2,
"last_mngmnt_pts set to %s\n", pts_to_str(orig_pts, t1));
} else if ((dg_id & 0x0f) == ctx->lang_tag + 1) { // subtile data group
if (ctx->last_mngmnt_id == ISDBSUB_NO_DGID)
mp_msg(MSGT_DECSUB, MSGL_V, "no management data group received yet.\n");
else if ((dg_id & 0xf0) == ctx->last_mngmnt_id)
process_sub_dg(ctx, p, dg_size);
} else
mp_msg(MSGT_DECSUB, MSGL_DBG2,
"Subtitle data group id 0x%02x, length %d\n", dg_id, dg_size);
p += dg_size + 2;
}
if (orig_pts != AV_NOPTS_VALUE && ctx->last_mngmnt_pts == AV_NOPTS_VALUE) {
ctx->last_mngmnt_pts = orig_pts;
mp_msg(MSGT_DECSUB, MSGL_V,
"replaced empty last_mngmnt_pts with the next received PTS.\n");
}
do_output(ctx, data, size);
if (endpts && orig_pts != AV_NOPTS_VALUE)
*endpts = (ctx->pts + ctx->duration) / 1000.0;
return *data ? 'a' : 0;
}
| 0p1pp1/mplayer | sub/isdbsubdec.c | C | gpl-2.0 | 81,372 |
/*
* BSD compression module
*
* Patched version for ISDN syncPPP written 1997/1998 by Michael Hipp
* The whole module is now SKB based.
*
*/
/*
* Update: The Berkeley copyright was changed, and the change
* is retroactive to all "true" BSD software (ie everything
* from UCB as opposed to other peoples code that just carried
* the same license). The new copyright doesn't clash with the
* GPL, so the module-only restriction has been removed..
*/
/*
* Original copyright notice:
*
* Copyright (c) 1985, 1986 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* James A. Woods, derived from original work by Spencer Thomas
* and Joseph Orost.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/string.h> /* used in new tty drivers */
#include <linux/signal.h> /* used in new tty drivers */
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include <asm/types.h>
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/inet.h>
#include <linux/ioctl.h>
#include <linux/vmalloc.h>
#include <linux/ppp_defs.h>
#include <linux/isdn.h>
#include <linux/isdn_ppp.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/if_arp.h>
#include <linux/ppp-comp.h>
#include "isdn_ppp.h"
MODULE_DESCRIPTION("ISDN4Linux: BSD Compression for PPP over ISDN");
MODULE_LICENSE("Dual BSD/GPL");
#define BSD_VERSION(x) ((x) >> 5)
#define BSD_NBITS(x) ((x) & 0x1F)
#define BSD_CURRENT_VERSION 1
#define DEBUG 1
/*
* A dictionary for doing BSD compress.
*/
struct bsd_dict {
u32 fcode;
u16 codem1; /* output of hash table -1 */
u16 cptr; /* map code to hash table entry */
};
struct bsd_db {
int totlen; /* length of this structure */
unsigned int hsize; /* size of the hash table */
unsigned char hshift; /* used in hash function */
unsigned char n_bits; /* current bits/code */
unsigned char maxbits; /* maximum bits/code */
unsigned char debug; /* non-zero if debug desired */
unsigned char unit; /* ppp unit number */
u16 seqno; /* sequence # of next packet */
unsigned int mru; /* size of receive (decompress) bufr */
unsigned int maxmaxcode; /* largest valid code */
unsigned int max_ent; /* largest code in use */
unsigned int in_count; /* uncompressed bytes, aged */
unsigned int bytes_out; /* compressed bytes, aged */
unsigned int ratio; /* recent compression ratio */
unsigned int checkpoint; /* when to next check the ratio */
unsigned int clear_count; /* times dictionary cleared */
unsigned int incomp_count; /* incompressible packets */
unsigned int incomp_bytes; /* incompressible bytes */
unsigned int uncomp_count; /* uncompressed packets */
unsigned int uncomp_bytes; /* uncompressed bytes */
unsigned int comp_count; /* compressed packets */
unsigned int comp_bytes; /* compressed bytes */
unsigned short *lens; /* array of lengths of codes */
struct bsd_dict *dict; /* dictionary */
int xmit;
};
#define BSD_OVHD 2 /* BSD compress overhead/packet */
#define MIN_BSD_BITS 9
#define BSD_INIT_BITS MIN_BSD_BITS
#define MAX_BSD_BITS 15
/*
* the next two codes should not be changed lightly, as they must not
* lie within the contiguous general code space.
*/
#define CLEAR 256 /* table clear output code */
#define FIRST 257 /* first free entry */
#define LAST 255
#define MAXCODE(b) ((1 << (b)) - 1)
#define BADCODEM1 MAXCODE(MAX_BSD_BITS)
#define BSD_HASH(prefix, suffix, hshift) ((((unsigned long)(suffix)) << (hshift)) \
^ (unsigned long)(prefix))
#define BSD_KEY(prefix, suffix) ((((unsigned long)(suffix)) << 16) \
+ (unsigned long)(prefix))
#define CHECK_GAP 10000 /* Ratio check interval */
#define RATIO_SCALE_LOG 8
#define RATIO_SCALE (1 << RATIO_SCALE_LOG)
#define RATIO_MAX (0x7fffffff >> RATIO_SCALE_LOG)
/*
* clear the dictionary
*/
static void bsd_clear(struct bsd_db *db)
{
db->clear_count++;
db->max_ent = FIRST - 1;
db->n_bits = BSD_INIT_BITS;
db->bytes_out = 0;
db->in_count = 0;
db->incomp_count = 0;
db->ratio = 0;
db->checkpoint = CHECK_GAP;
}
/*
* If the dictionary is full, then see if it is time to reset it.
*
* Compute the compression ratio using fixed-point arithmetic
* with 8 fractional bits.
*
* Since we have an infinite stream instead of a single file,
* watch only the local compression ratio.
*
* Since both peers must reset the dictionary at the same time even in
* the absence of CLEAR codes (while packets are incompressible), they
* must compute the same ratio.
*/
static int bsd_check(struct bsd_db *db) /* 1=output CLEAR */
{
unsigned int new_ratio;
if (db->in_count >= db->checkpoint)
{
/* age the ratio by limiting the size of the counts */
if (db->in_count >= RATIO_MAX || db->bytes_out >= RATIO_MAX)
{
db->in_count -= (db->in_count >> 2);
db->bytes_out -= (db->bytes_out >> 2);
}
db->checkpoint = db->in_count + CHECK_GAP;
if (db->max_ent >= db->maxmaxcode)
{
/* Reset the dictionary only if the ratio is worse,
* or if it looks as if it has been poisoned
* by incompressible data.
*
* This does not overflow, because
* db->in_count <= RATIO_MAX.
*/
new_ratio = db->in_count << RATIO_SCALE_LOG;
if (db->bytes_out != 0)
{
new_ratio /= db->bytes_out;
}
if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE)
{
bsd_clear(db);
return 1;
}
db->ratio = new_ratio;
}
}
return 0;
}
/*
* Return statistics.
*/
static void bsd_stats(void *state, struct compstat *stats)
{
struct bsd_db *db = (struct bsd_db *) state;
stats->unc_bytes = db->uncomp_bytes;
stats->unc_packets = db->uncomp_count;
stats->comp_bytes = db->comp_bytes;
stats->comp_packets = db->comp_count;
stats->inc_bytes = db->incomp_bytes;
stats->inc_packets = db->incomp_count;
stats->in_count = db->in_count;
stats->bytes_out = db->bytes_out;
}
/*
* Reset state, as on a CCP ResetReq.
*/
static void bsd_reset(void *state, unsigned char code, unsigned char id,
unsigned char *data, unsigned len,
struct isdn_ppp_resetparams *rsparm)
{
struct bsd_db *db = (struct bsd_db *) state;
bsd_clear(db);
db->seqno = 0;
db->clear_count = 0;
}
/*
* Release the compression structure
*/
static void bsd_free(void *state)
{
struct bsd_db *db = (struct bsd_db *) state;
if (db) {
/*
* Release the dictionary
*/
vfree(db->dict);
db->dict = NULL;
/*
* Release the string buffer
*/
vfree(db->lens);
db->lens = NULL;
/*
* Finally release the structure itself.
*/
kfree(db);
}
}
/*
* Allocate space for a (de) compressor.
*/
static void *bsd_alloc(struct isdn_ppp_comp_data *data)
{
int bits;
unsigned int hsize, hshift, maxmaxcode;
struct bsd_db *db;
int decomp;
static unsigned int htab[][2] = {
{ 5003 , 4 } , { 5003 , 4 } , { 5003 , 4 } , { 5003 , 4 } ,
{ 9001 , 5 } , { 18013 , 6 } , { 35023 , 7 } , { 69001 , 8 }
};
if (data->optlen != 1 || data->num != CI_BSD_COMPRESS
|| BSD_VERSION(data->options[0]) != BSD_CURRENT_VERSION)
return NULL;
bits = BSD_NBITS(data->options[0]);
if (bits < 9 || bits > 15)
return NULL;
hsize = htab[bits - 9][0];
hshift = htab[bits - 9][1];
/*
* Allocate the main control structure for this instance.
*/
maxmaxcode = MAXCODE(bits);
db = kzalloc(sizeof(struct bsd_db), GFP_KERNEL);
if (!db)
return NULL;
db->xmit = data->flags & IPPP_COMP_FLAG_XMIT;
decomp = db->xmit ? 0 : 1;
/*
* Allocate space for the dictionary. This may be more than one page in
* length.
*/
db->dict = vmalloc(hsize * sizeof(struct bsd_dict));
if (!db->dict) {
bsd_free(db);
return NULL;
}
/*
* If this is the compression buffer then there is no length data.
* For decompression, the length information is needed as well.
*/
if (!decomp)
db->lens = NULL;
else {
db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0]));
if (!db->lens) {
bsd_free(db);
return (NULL);
}
}
/*
* Initialize the data information for the compression code
*/
db->totlen = sizeof(struct bsd_db) + (sizeof(struct bsd_dict) * hsize);
db->hsize = hsize;
db->hshift = hshift;
db->maxmaxcode = maxmaxcode;
db->maxbits = bits;
return (void *)db;
}
/*
* Initialize the database.
*/
static int bsd_init(void *state, struct isdn_ppp_comp_data *data, int unit, int debug)
{
struct bsd_db *db = state;
int indx;
int decomp;
if (!state || !data) {
printk(KERN_ERR "isdn_bsd_init: [%d] ERR, state %lx data %lx\n", unit, (long)state, (long)data);
return 0;
}
decomp = db->xmit ? 0 : 1;
if (data->optlen != 1 || data->num != CI_BSD_COMPRESS
|| (BSD_VERSION(data->options[0]) != BSD_CURRENT_VERSION)
|| (BSD_NBITS(data->options[0]) != db->maxbits)
|| (decomp && db->lens == NULL)) {
printk(KERN_ERR "isdn_bsd: %d %d %d %d %lx\n", data->optlen, data->num, data->options[0], decomp, (unsigned long)db->lens);
return 0;
}
if (decomp)
for (indx = LAST; indx >= 0; indx--)
db->lens[indx] = 1;
indx = db->hsize;
while (indx-- != 0) {
db->dict[indx].codem1 = BADCODEM1;
db->dict[indx].cptr = 0;
}
db->unit = unit;
db->mru = 0;
db->debug = 1;
bsd_reset(db, 0, 0, NULL, 0, NULL);
return 1;
}
/*
* Obtain pointers to the various structures in the compression tables
*/
#define dict_ptrx(p, idx) &(p->dict[idx])
#define lens_ptrx(p, idx) &(p->lens[idx])
#ifdef DEBUG
static unsigned short *lens_ptr(struct bsd_db *db, int idx)
{
if ((unsigned int) idx > (unsigned int) db->maxmaxcode) {
printk(KERN_DEBUG "<9>ppp: lens_ptr(%d) > max\n", idx);
idx = 0;
}
return lens_ptrx(db, idx);
}
static struct bsd_dict *dict_ptr(struct bsd_db *db, int idx)
{
if ((unsigned int) idx >= (unsigned int) db->hsize) {
printk(KERN_DEBUG "<9>ppp: dict_ptr(%d) > max\n", idx);
idx = 0;
}
return dict_ptrx(db, idx);
}
#else
#define lens_ptr(db, idx) lens_ptrx(db, idx)
#define dict_ptr(db, idx) dict_ptrx(db, idx)
#endif
/*
* compress a packet
*/
static int bsd_compress(void *state, struct sk_buff *skb_in, struct sk_buff *skb_out, int proto)
{
struct bsd_db *db;
int hshift;
unsigned int max_ent;
unsigned int n_bits;
unsigned int bitno;
unsigned long accm;
int ent;
unsigned long fcode;
struct bsd_dict *dictp;
unsigned char c;
int hval, disp, ilen, mxcode;
unsigned char *rptr = skb_in->data;
int isize = skb_in->len;
#define OUTPUT(ent) \
{ \
bitno -= n_bits; \
accm |= ((ent) << bitno); \
do { \
if (skb_out && skb_tailroom(skb_out) > 0) \
*(skb_put(skb_out, 1)) = (unsigned char)(accm >> 24); \
accm <<= 8; \
bitno += 8; \
} while (bitno <= 24); \
}
/*
* If the protocol is not in the range we're interested in,
* just return without compressing the packet. If it is,
* the protocol becomes the first byte to compress.
*/
printk(KERN_DEBUG "bsd_compress called with %x\n", proto);
ent = proto;
if (proto < 0x21 || proto > 0xf9 || !(proto & 0x1))
return 0;
db = (struct bsd_db *) state;
hshift = db->hshift;
max_ent = db->max_ent;
n_bits = db->n_bits;
bitno = 32;
accm = 0;
mxcode = MAXCODE(n_bits);
/* This is the PPP header information */
if (skb_out && skb_tailroom(skb_out) >= 2) {
char *v = skb_put(skb_out, 2);
/* we only push our own data on the header,
AC,PC and protos is pushed by caller */
v[0] = db->seqno >> 8;
v[1] = db->seqno;
}
ilen = ++isize; /* This is off by one, but that is what is in draft! */
while (--ilen > 0) {
c = *rptr++;
fcode = BSD_KEY(ent, c);
hval = BSD_HASH(ent, c, hshift);
dictp = dict_ptr(db, hval);
/* Validate and then check the entry. */
if (dictp->codem1 >= max_ent)
goto nomatch;
if (dictp->fcode == fcode) {
ent = dictp->codem1 + 1;
continue; /* found (prefix,suffix) */
}
/* continue probing until a match or invalid entry */
disp = (hval == 0) ? 1 : hval;
do {
hval += disp;
if (hval >= db->hsize)
hval -= db->hsize;
dictp = dict_ptr(db, hval);
if (dictp->codem1 >= max_ent)
goto nomatch;
} while (dictp->fcode != fcode);
ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */
continue;
nomatch:
OUTPUT(ent); /* output the prefix */
/* code -> hashtable */
if (max_ent < db->maxmaxcode) {
struct bsd_dict *dictp2;
struct bsd_dict *dictp3;
int indx;
/* expand code size if needed */
if (max_ent >= mxcode) {
db->n_bits = ++n_bits;
mxcode = MAXCODE(n_bits);
}
/*
* Invalidate old hash table entry using
* this code, and then take it over.
*/
dictp2 = dict_ptr(db, max_ent + 1);
indx = dictp2->cptr;
dictp3 = dict_ptr(db, indx);
if (dictp3->codem1 == max_ent)
dictp3->codem1 = BADCODEM1;
dictp2->cptr = hval;
dictp->codem1 = max_ent;
dictp->fcode = fcode;
db->max_ent = ++max_ent;
if (db->lens) {
unsigned short *len1 = lens_ptr(db, max_ent);
unsigned short *len2 = lens_ptr(db, ent);
*len1 = *len2 + 1;
}
}
ent = c;
}
OUTPUT(ent); /* output the last code */
if (skb_out)
db->bytes_out += skb_out->len; /* Do not count bytes from here */
db->uncomp_bytes += isize;
db->in_count += isize;
++db->uncomp_count;
++db->seqno;
if (bitno < 32)
++db->bytes_out; /* must be set before calling bsd_check */
/*
* Generate the clear command if needed
*/
if (bsd_check(db))
OUTPUT(CLEAR);
/*
* Pad dribble bits of last code with ones.
* Do not emit a completely useless byte of ones.
*/
if (bitno < 32 && skb_out && skb_tailroom(skb_out) > 0)
*(skb_put(skb_out, 1)) = (unsigned char)((accm | (0xff << (bitno - 8))) >> 24);
/*
* Increase code size if we would have without the packet
* boundary because the decompressor will do so.
*/
if (max_ent >= mxcode && max_ent < db->maxmaxcode)
db->n_bits++;
/* If output length is too large then this is an incompressible frame. */
if (!skb_out || (skb_out && skb_out->len >= skb_in->len)) {
++db->incomp_count;
db->incomp_bytes += isize;
return 0;
}
/* Count the number of compressed frames */
++db->comp_count;
db->comp_bytes += skb_out->len;
return skb_out->len;
#undef OUTPUT
}
/*
* Update the "BSD Compress" dictionary on the receiver for
* incompressible data by pretending to compress the incoming data.
*/
static void bsd_incomp(void *state, struct sk_buff *skb_in, int proto)
{
bsd_compress(state, skb_in, NULL, proto);
}
/*
* Decompress "BSD Compress".
*/
static int bsd_decompress(void *state, struct sk_buff *skb_in, struct sk_buff *skb_out,
struct isdn_ppp_resetparams *rsparm)
{
struct bsd_db *db;
unsigned int max_ent;
unsigned long accm;
unsigned int bitno; /* 1st valid bit in accm */
unsigned int n_bits;
unsigned int tgtbitno; /* bitno when we have a code */
struct bsd_dict *dictp;
int seq;
unsigned int incode;
unsigned int oldcode;
unsigned int finchar;
unsigned char *p, *ibuf;
int ilen;
int codelen;
int extra;
db = (struct bsd_db *) state;
max_ent = db->max_ent;
accm = 0;
bitno = 32; /* 1st valid bit in accm */
n_bits = db->n_bits;
tgtbitno = 32 - n_bits; /* bitno when we have a code */
printk(KERN_DEBUG "bsd_decompress called\n");
if (!skb_in || !skb_out) {
printk(KERN_ERR "bsd_decompress called with NULL parameter\n");
return DECOMP_ERROR;
}
/*
* Get the sequence number.
*/
if ((p = skb_pull(skb_in, 2)) == NULL) {
return DECOMP_ERROR;
}
p -= 2;
seq = (p[0] << 8) + p[1];
ilen = skb_in->len;
ibuf = skb_in->data;
/*
* Check the sequence number and give up if it differs from
* the value we're expecting.
*/
if (seq != db->seqno) {
if (db->debug) {
printk(KERN_DEBUG "bsd_decomp%d: bad sequence # %d, expected %d\n",
db->unit, seq, db->seqno - 1);
}
return DECOMP_ERROR;
}
++db->seqno;
db->bytes_out += ilen;
if (skb_tailroom(skb_out) > 0)
*(skb_put(skb_out, 1)) = 0;
else
return DECOMP_ERR_NOMEM;
oldcode = CLEAR;
/*
* Keep the checkpoint correctly so that incompressible packets
* clear the dictionary at the proper times.
*/
for (;;) {
if (ilen-- <= 0) {
db->in_count += (skb_out->len - 1); /* don't count the header */
break;
}
/*
* Accumulate bytes until we have a complete code.
* Then get the next code, relying on the 32-bit,
* unsigned accm to mask the result.
*/
bitno -= 8;
accm |= *ibuf++ << bitno;
if (tgtbitno < bitno)
continue;
incode = accm >> tgtbitno;
accm <<= n_bits;
bitno += n_bits;
/*
* The dictionary must only be cleared at the end of a packet.
*/
if (incode == CLEAR) {
if (ilen > 0) {
if (db->debug)
printk(KERN_DEBUG "bsd_decomp%d: bad CLEAR\n", db->unit);
return DECOMP_FATALERROR; /* probably a bug */
}
bsd_clear(db);
break;
}
if ((incode > max_ent + 2) || (incode > db->maxmaxcode)
|| (incode > max_ent && oldcode == CLEAR)) {
if (db->debug) {
printk(KERN_DEBUG "bsd_decomp%d: bad code 0x%x oldcode=0x%x ",
db->unit, incode, oldcode);
printk(KERN_DEBUG "max_ent=0x%x skb->Len=%d seqno=%d\n",
max_ent, skb_out->len, db->seqno);
}
return DECOMP_FATALERROR; /* probably a bug */
}
/* Special case for KwKwK string. */
if (incode > max_ent) {
finchar = oldcode;
extra = 1;
} else {
finchar = incode;
extra = 0;
}
codelen = *(lens_ptr(db, finchar));
if (skb_tailroom(skb_out) < codelen + extra) {
if (db->debug) {
printk(KERN_DEBUG "bsd_decomp%d: ran out of mru\n", db->unit);
#ifdef DEBUG
printk(KERN_DEBUG " len=%d, finchar=0x%x, codelen=%d,skblen=%d\n",
ilen, finchar, codelen, skb_out->len);
#endif
}
return DECOMP_FATALERROR;
}
/*
* Decode this code and install it in the decompressed buffer.
*/
p = skb_put(skb_out, codelen);
p += codelen;
while (finchar > LAST) {
struct bsd_dict *dictp2 = dict_ptr(db, finchar);
dictp = dict_ptr(db, dictp2->cptr);
#ifdef DEBUG
if (--codelen <= 0 || dictp->codem1 != finchar - 1) {
if (codelen <= 0) {
printk(KERN_ERR "bsd_decomp%d: fell off end of chain ", db->unit);
printk(KERN_ERR "0x%x at 0x%x by 0x%x, max_ent=0x%x\n", incode, finchar, dictp2->cptr, max_ent);
} else {
if (dictp->codem1 != finchar - 1) {
printk(KERN_ERR "bsd_decomp%d: bad code chain 0x%x finchar=0x%x ", db->unit, incode, finchar);
printk(KERN_ERR "oldcode=0x%x cptr=0x%x codem1=0x%x\n", oldcode, dictp2->cptr, dictp->codem1);
}
}
return DECOMP_FATALERROR;
}
#endif
{
u32 fcode = dictp->fcode;
*--p = (fcode >> 16) & 0xff;
finchar = fcode & 0xffff;
}
}
*--p = finchar;
#ifdef DEBUG
if (--codelen != 0)
printk(KERN_ERR "bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n", db->unit, codelen, incode, max_ent);
#endif
if (extra) /* the KwKwK case again */
*(skb_put(skb_out, 1)) = finchar;
/*
* If not first code in a packet, and
* if not out of code space, then allocate a new code.
*
* Keep the hash table correct so it can be used
* with uncompressed packets.
*/
if (oldcode != CLEAR && max_ent < db->maxmaxcode) {
struct bsd_dict *dictp2, *dictp3;
u16 *lens1, *lens2;
unsigned long fcode;
int hval, disp, indx;
fcode = BSD_KEY(oldcode, finchar);
hval = BSD_HASH(oldcode, finchar, db->hshift);
dictp = dict_ptr(db, hval);
/* look for a free hash table entry */
if (dictp->codem1 < max_ent) {
disp = (hval == 0) ? 1 : hval;
do {
hval += disp;
if (hval >= db->hsize)
hval -= db->hsize;
dictp = dict_ptr(db, hval);
} while (dictp->codem1 < max_ent);
}
/*
* Invalidate previous hash table entry
* assigned this code, and then take it over
*/
dictp2 = dict_ptr(db, max_ent + 1);
indx = dictp2->cptr;
dictp3 = dict_ptr(db, indx);
if (dictp3->codem1 == max_ent)
dictp3->codem1 = BADCODEM1;
dictp2->cptr = hval;
dictp->codem1 = max_ent;
dictp->fcode = fcode;
db->max_ent = ++max_ent;
/* Update the length of this string. */
lens1 = lens_ptr(db, max_ent);
lens2 = lens_ptr(db, oldcode);
*lens1 = *lens2 + 1;
/* Expand code size if needed. */
if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode) {
db->n_bits = ++n_bits;
tgtbitno = 32-n_bits;
}
}
oldcode = incode;
}
++db->comp_count;
++db->uncomp_count;
db->comp_bytes += skb_in->len - BSD_OVHD;
db->uncomp_bytes += skb_out->len;
if (bsd_check(db)) {
if (db->debug)
printk(KERN_DEBUG "bsd_decomp%d: peer should have cleared dictionary on %d\n",
db->unit, db->seqno - 1);
}
return skb_out->len;
}
/*************************************************************
* Table of addresses for the BSD compression module
*************************************************************/
static struct isdn_ppp_compressor ippp_bsd_compress = {
.owner = THIS_MODULE,
.num = CI_BSD_COMPRESS,
.alloc = bsd_alloc,
.free = bsd_free,
.init = bsd_init,
.reset = bsd_reset,
.compress = bsd_compress,
.decompress = bsd_decompress,
.incomp = bsd_incomp,
.stat = bsd_stats,
};
/*************************************************************
* Module support routines
*************************************************************/
static int __init isdn_bsdcomp_init(void)
{
int answer = isdn_ppp_register_compressor(&ippp_bsd_compress);
if (answer == 0)
printk(KERN_INFO "PPP BSD Compression module registered\n");
return answer;
}
static void __exit isdn_bsdcomp_exit(void)
{
isdn_ppp_unregister_compressor(&ippp_bsd_compress);
}
module_init(isdn_bsdcomp_init);
module_exit(isdn_bsdcomp_exit);
| Jackeagle/android_kernel_sony_c2305 | drivers/isdn/i4l/isdn_bsdcomp.c | C | gpl-2.0 | 24,813 |
/*
* resolve.c - resolve names and tags into specific devices
*
* Copyright (C) 2001, 2003 Theodore Ts'o.
* Copyright (C) 2001 Andreas Dilger
*
* %Begin-Header%
* This file may be redistributed under the terms of the
* GNU Lesser General Public License.
* %End-Header%
*/
#include <stdio.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <stdlib.h>
#include <fcntl.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "blkidP.h"
#include "probe.h"
/*
* Find a tagname (e.g. LABEL or UUID) on a specific device.
*/
char *blkid_get_tag_value(blkid_cache cache, const char *tagname,
const char *devname)
{
blkid_tag found;
blkid_dev dev;
blkid_cache c = cache;
char *ret = NULL;
DBG(DEBUG_RESOLVE, printf("looking for %s on %s\n", tagname, devname));
if (!devname)
return NULL;
if (!cache) {
if (blkid_get_cache(&c, NULL) < 0)
return NULL;
}
if ((dev = blkid_get_dev(c, devname, BLKID_DEV_NORMAL)) &&
(found = blkid_find_tag_dev(dev, tagname)))
ret = blkid_strdup(found->bit_val);
if (!cache)
blkid_put_cache(c);
return ret;
}
/*
* Locate a device name from a token (NAME=value string), or (name, value)
* pair. In the case of a token, value is ignored. If the "token" is not
* of the form "NAME=value" and there is no value given, then it is assumed
* to be the actual devname and a copy is returned.
*/
char *blkid_get_devname(blkid_cache cache, const char *token,
const char *value)
{
blkid_dev dev;
blkid_cache c = cache;
char *t = 0, *v = 0;
char *ret = NULL;
if (!token)
return NULL;
if (!cache) {
if (blkid_get_cache(&c, NULL) < 0)
return NULL;
}
DBG(DEBUG_RESOLVE,
printf("looking for %s%s%s %s\n", token, value ? "=" : "",
value ? value : "", cache ? "in cache" : "from disk"));
if (!value) {
if (!strchr(token, '='))
return blkid_strdup(token);
blkid_parse_tag_string(token, &t, &v);
if (!t || !v)
goto errout;
token = t;
value = v;
}
dev = blkid_find_dev_with_tag(c, token, value);
if (!dev)
goto errout;
ret = blkid_strdup(blkid_dev_devname(dev));
errout:
if (t)
free(t);
if (v)
free(v);
if (!cache) {
blkid_put_cache(c);
}
return (ret);
}
#ifdef TEST_PROGRAM
int main(int argc, char **argv)
{
char *value;
blkid_cache cache;
blkid_debug_mask = DEBUG_ALL;
if (argc != 2 && argc != 3) {
fprintf(stderr, "Usage:\t%s tagname=value\n"
"\t%s tagname devname\n"
"Find which device holds a given token or\n"
"Find what the value of a tag is in a device\n",
argv[0], argv[0]);
exit(1);
}
if (blkid_get_cache(&cache, bb_dev_null) < 0) {
fprintf(stderr, "Couldn't get blkid cache\n");
exit(1);
}
if (argv[2]) {
value = blkid_get_tag_value(cache, argv[1], argv[2]);
printf("%s has tag %s=%s\n", argv[2], argv[1],
value ? value : "<missing>");
} else {
value = blkid_get_devname(cache, argv[1], NULL);
printf("%s has tag %s\n", value ? value : "<none>", argv[1]);
}
blkid_put_cache(cache);
return value ? 0 : 1;
}
#endif
| jgunthorpe/busybox | e2fsprogs/blkid/resolve.c | C | gpl-2.0 | 3,032 |
/*
* libata-scsi.c - helper library for ATA
*
* Maintained by: Jeff Garzik <jgarzik@pobox.com>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/DocBook/libata.*
*
* Hardware documentation available from
* - http://www.t10.org/
* - http://www.t13.org/
*
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport.h>
#include <linux/libata.h>
#include <linux/hdreg.h>
#include <linux/uaccess.h>
#include <linux/suspend.h>
#include <asm/unaligned.h>
#include "libata.h"
#define SECTOR_SIZE 512
#define ATA_SCSI_RBUF_SIZE 4096
static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
const struct scsi_device *scsidev);
static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
const struct scsi_device *scsidev);
static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, unsigned int lun);
#define RW_RECOVERY_MPAGE 0x1
#define RW_RECOVERY_MPAGE_LEN 12
#define CACHE_MPAGE 0x8
#define CACHE_MPAGE_LEN 20
#define CONTROL_MPAGE 0xa
#define CONTROL_MPAGE_LEN 12
#define ALL_MPAGES 0x3f
#define ALL_SUB_MPAGES 0xff
static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
RW_RECOVERY_MPAGE,
RW_RECOVERY_MPAGE_LEN - 2,
(1 << 7), /* AWRE */
0, /* read retry count */
0, 0, 0, 0,
0, /* write retry count */
0, 0, 0
};
static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
CACHE_MPAGE,
CACHE_MPAGE_LEN - 2,
0, /* contains WCE, needs to be 0 for logic */
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, /* contains DRA, needs to be 0 for logic */
0, 0, 0, 0, 0, 0, 0
};
static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
CONTROL_MPAGE,
CONTROL_MPAGE_LEN - 2,
2, /* DSENSE=0, GLTSD=1 */
0, /* [QAM+QERR may be 1, see 05-359r1] */
0, 0, 0, 0, 0xff, 0xff,
0, 30 /* extended self test time, see 05-359r1 */
};
/*
* libata transport template. libata doesn't do real transport stuff.
* It just needs the eh_timed_out hook.
*/
static struct scsi_transport_template ata_scsi_transport_template = {
.eh_strategy_handler = ata_scsi_error,
.eh_timed_out = ata_scsi_timed_out,
.user_scan = ata_scsi_user_scan,
};
static const struct {
enum link_pm value;
const char *name;
} link_pm_policy[] = {
{ NOT_AVAILABLE, "max_performance" },
{ MIN_POWER, "min_power" },
{ MAX_PERFORMANCE, "max_performance" },
{ MEDIUM_POWER, "medium_power" },
};
static const char *ata_scsi_lpm_get(enum link_pm policy)
{
int i;
for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++)
if (link_pm_policy[i].value == policy)
return link_pm_policy[i].name;
return NULL;
}
static ssize_t ata_scsi_lpm_put(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
enum link_pm policy = 0;
int i;
/*
* we are skipping array location 0 on purpose - this
* is because a value of NOT_AVAILABLE is displayed
* to the user as max_performance, but when the user
* writes "max_performance", they actually want the
* value to match MAX_PERFORMANCE.
*/
for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) {
const int len = strlen(link_pm_policy[i].name);
if (strncmp(link_pm_policy[i].name, buf, len) == 0) {
policy = link_pm_policy[i].value;
break;
}
}
if (!policy)
return -EINVAL;
ata_lpm_schedule(ap, policy);
return count;
}
static ssize_t
ata_scsi_lpm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
const char *policy =
ata_scsi_lpm_get(ap->pm_policy);
if (!policy)
return -EINVAL;
return snprintf(buf, 23, "%s\n", policy);
}
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
ata_scsi_lpm_show, ata_scsi_lpm_put);
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
static ssize_t ata_scsi_park_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap;
struct ata_link *link;
struct ata_device *dev;
unsigned long flags, now;
unsigned int uninitialized_var(msecs);
int rc = 0;
ap = ata_shost_to_port(sdev->host);
spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev) {
rc = -ENODEV;
goto unlock;
}
if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
rc = -EOPNOTSUPP;
goto unlock;
}
link = dev->link;
now = jiffies;
if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
link->eh_context.unloaded_mask & (1 << dev->devno) &&
time_after(dev->unpark_deadline, now))
msecs = jiffies_to_msecs(dev->unpark_deadline - now);
else
msecs = 0;
unlock:
spin_unlock_irq(ap->lock);
return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
}
static ssize_t ata_scsi_park_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap;
struct ata_device *dev;
long int input;
unsigned long flags;
int rc;
rc = strict_strtol(buf, 10, &input);
if (rc || input < -2)
return -EINVAL;
if (input > ATA_TMOUT_MAX_PARK) {
rc = -EOVERFLOW;
input = ATA_TMOUT_MAX_PARK;
}
ap = ata_shost_to_port(sdev->host);
spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (unlikely(!dev)) {
rc = -ENODEV;
goto unlock;
}
if (dev->class != ATA_DEV_ATA) {
rc = -EOPNOTSUPP;
goto unlock;
}
if (input >= 0) {
if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
rc = -EOPNOTSUPP;
goto unlock;
}
dev->unpark_deadline = ata_deadline(jiffies, input);
dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
ata_port_schedule_eh(ap);
complete(&ap->park_req_pending);
} else {
switch (input) {
case -1:
dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
break;
case -2:
dev->flags |= ATA_DFLAG_NO_UNLOAD;
break;
}
}
unlock:
spin_unlock_irqrestore(ap->lock, flags);
return rc ? rc : len;
}
DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
}
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
return ap->ops->em_store(ap, buf, count);
return -EINVAL;
}
static ssize_t
ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
return ap->ops->em_show(ap, buf);
return -EINVAL;
}
DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
ata_scsi_em_message_show, ata_scsi_em_message_store);
EXPORT_SYMBOL_GPL(dev_attr_em_message);
static ssize_t
ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
return snprintf(buf, 23, "%d\n", ap->em_message_type);
}
DEVICE_ATTR(em_message_type, S_IRUGO,
ata_scsi_em_message_type_show, NULL);
EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
static ssize_t
ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
return ap->ops->sw_activity_show(atadev, buf);
return -EINVAL;
}
static ssize_t
ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
enum sw_activity val;
int rc;
if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
val = simple_strtoul(buf, NULL, 0);
switch (val) {
case OFF: case BLINK_ON: case BLINK_OFF:
rc = ap->ops->sw_activity_store(atadev, val);
if (!rc)
return count;
else
return rc;
}
}
return -EINVAL;
}
DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
ata_scsi_activity_store);
EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
struct device_attribute *ata_common_sdev_attrs[] = {
&dev_attr_unload_heads,
NULL
};
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
/* "Invalid field in cbd" */
done(cmd);
}
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
* @sdev: SCSI device for which BIOS geometry is to be determined
* @bdev: block device associated with @sdev
* @capacity: capacity of SCSI device
* @geom: location to which geometry will be output
*
* Generic bios head/sector/cylinder calculator
* used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
* mapping. Some situations may arise where the disk is not
* bootable if this is not used.
*
* LOCKING:
* Defined by the SCSI layer. We don't really care.
*
* RETURNS:
* Zero.
*/
int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
geom[0] = 255;
geom[1] = 63;
sector_div(capacity, 255*63);
geom[2] = capacity;
return 0;
}
/**
* ata_scsi_unlock_native_capacity - unlock native capacity
* @sdev: SCSI device to adjust device capacity for
*
* This function is called if a partition on @sdev extends beyond
* the end of the device. It requests EH to unlock HPA.
*
* LOCKING:
* Defined by the SCSI layer. Might sleep.
*/
void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (dev && dev->n_sectors < dev->n_native_sectors) {
dev->flags |= ATA_DFLAG_UNLOCK_HPA;
dev->link->eh_info.action |= ATA_EH_RESET;
ata_port_schedule_eh(ap);
}
spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
}
void ata_scsi_reconfig(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
return;
ata_dev_read_id(dev, &dev->class, 0, dev->id);
dev->pio_mask = UINT_MAX;
dev->mwdma_mask = UINT_MAX;
dev->udma_mask = UINT_MAX;
ata_dev_configure(dev);
ata_set_mode(&ap->link, &dev);
}
EXPORT_SYMBOL(ata_scsi_reconfig);
/**
* ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
* @ap: target port
* @sdev: SCSI device to get identify data for
* @arg: User buffer area for identify data
*
* LOCKING:
* Defined by the SCSI layer. We don't really care.
*
* RETURNS:
* Zero on success, negative errno on error.
*/
static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
void __user *arg)
{
struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
u16 __user *dst = arg;
char buf[40];
if (!dev)
return -ENOMSG;
if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
return -EFAULT;
ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
return -EFAULT;
ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
return -EFAULT;
ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
return -EFAULT;
return 0;
}
/**
* ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
* @scsidev: Device to which we are issuing command
* @arg: User provided data for issuing command
*
* LOCKING:
* Defined by the SCSI layer. We don't really care.
*
* RETURNS:
* Zero on success, negative errno on error.
*/
int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
{
int rc = 0;
u8 scsi_cmd[MAX_COMMAND_SIZE];
u8 args[4], *argbuf = NULL, *sensebuf = NULL;
int argsize = 0;
enum dma_data_direction data_dir;
int cmd_result;
if (arg == NULL)
return -EINVAL;
if (copy_from_user(args, arg, sizeof(args)))
return -EFAULT;
sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
if (!sensebuf)
return -ENOMEM;
memset(scsi_cmd, 0, sizeof(scsi_cmd));
if (args[3]) {
argsize = SECTOR_SIZE * args[3];
argbuf = kmalloc(argsize, GFP_KERNEL);
if (argbuf == NULL) {
rc = -ENOMEM;
goto error;
}
scsi_cmd[1] = (4 << 1); /* PIO Data-in */
scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
block count in sector count field */
data_dir = DMA_FROM_DEVICE;
} else {
scsi_cmd[1] = (3 << 1); /* Non-data */
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
data_dir = DMA_NONE;
}
scsi_cmd[0] = ATA_16;
scsi_cmd[4] = args[2];
if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */
scsi_cmd[6] = args[3];
scsi_cmd[8] = args[1];
scsi_cmd[10] = 0x4f;
scsi_cmd[12] = 0xc2;
} else {
scsi_cmd[6] = args[1];
}
scsi_cmd[14] = args[0];
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
sensebuf, (10*HZ), 5, 0, NULL);
if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
u8 *desc = sensebuf + 8;
cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
/* If we set cc then ATA pass-through will cause a
* check condition even if no error. Filter that. */
if (cmd_result & SAM_STAT_CHECK_CONDITION) {
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
if (sshdr.sense_key == 0 &&
sshdr.asc == 0 && sshdr.ascq == 0)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
}
/* Send userspace a few ATA registers (same as drivers/ide) */
if (sensebuf[0] == 0x72 && /* format is "descriptor" */
desc[0] == 0x09) { /* code is "ATA Descriptor" */
args[0] = desc[13]; /* status */
args[1] = desc[3]; /* error */
args[2] = desc[5]; /* sector count (0:7) */
if (copy_to_user(arg, args, sizeof(args)))
rc = -EFAULT;
}
}
if (cmd_result) {
rc = -EIO;
goto error;
}
if ((argbuf)
&& copy_to_user(arg + sizeof(args), argbuf, argsize))
rc = -EFAULT;
error:
kfree(sensebuf);
kfree(argbuf);
return rc;
}
/**
* ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
* @scsidev: Device to which we are issuing command
* @arg: User provided data for issuing command
*
* LOCKING:
* Defined by the SCSI layer. We don't really care.
*
* RETURNS:
* Zero on success, negative errno on error.
*/
int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
{
int rc = 0;
u8 scsi_cmd[MAX_COMMAND_SIZE];
u8 args[7], *sensebuf = NULL;
int cmd_result;
if (arg == NULL)
return -EINVAL;
if (copy_from_user(args, arg, sizeof(args)))
return -EFAULT;
sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
if (!sensebuf)
return -ENOMEM;
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
scsi_cmd[1] = (3 << 1); /* Non-data */
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
scsi_cmd[4] = args[1];
scsi_cmd[6] = args[2];
scsi_cmd[8] = args[3];
scsi_cmd[10] = args[4];
scsi_cmd[12] = args[5];
scsi_cmd[13] = args[6] & 0x4f;
scsi_cmd[14] = args[0];
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
sensebuf, (10*HZ), 5, 0, NULL);
if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
u8 *desc = sensebuf + 8;
cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
/* If we set cc then ATA pass-through will cause a
* check condition even if no error. Filter that. */
if (cmd_result & SAM_STAT_CHECK_CONDITION) {
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
if (sshdr.sense_key == 0 &&
sshdr.asc == 0 && sshdr.ascq == 0)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
}
/* Send userspace ATA registers */
if (sensebuf[0] == 0x72 && /* format is "descriptor" */
desc[0] == 0x09) {/* code is "ATA Descriptor" */
args[0] = desc[13]; /* status */
args[1] = desc[3]; /* error */
args[2] = desc[5]; /* sector count (0:7) */
args[3] = desc[7]; /* lbal */
args[4] = desc[9]; /* lbam */
args[5] = desc[11]; /* lbah */
args[6] = desc[12]; /* select */
if (copy_to_user(arg, args, sizeof(args)))
rc = -EFAULT;
}
}
if (cmd_result) {
rc = -EIO;
goto error;
}
error:
kfree(sensebuf);
return rc;
}
static int ata_ioc32(struct ata_port *ap)
{
if (ap->flags & ATA_FLAG_PIO_DMA)
return 1;
if (ap->pflags & ATA_PFLAG_PIO32)
return 1;
return 0;
}
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
int cmd, void __user *arg)
{
int val = -EINVAL, rc = -EINVAL;
unsigned long flags;
switch (cmd) {
case ATA_IOC_GET_IO32:
spin_lock_irqsave(ap->lock, flags);
val = ata_ioc32(ap);
spin_unlock_irqrestore(ap->lock, flags);
if (copy_to_user(arg, &val, 1))
return -EFAULT;
return 0;
case ATA_IOC_SET_IO32:
val = (unsigned long) arg;
rc = 0;
spin_lock_irqsave(ap->lock, flags);
if (ap->pflags & ATA_PFLAG_PIO32CHANGE) {
if (val)
ap->pflags |= ATA_PFLAG_PIO32;
else
ap->pflags &= ~ATA_PFLAG_PIO32;
} else {
if (val != ata_ioc32(ap))
rc = -EINVAL;
}
spin_unlock_irqrestore(ap->lock, flags);
return rc;
case HDIO_GET_IDENTITY:
return ata_get_identity(ap, scsidev, arg);
case HDIO_DRIVE_CMD:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
return ata_cmd_ioctl(scsidev, arg);
case HDIO_DRIVE_TASK:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
return ata_task_ioctl(scsidev, arg);
default:
rc = -ENOTTY;
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
{
return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
scsidev, cmd, arg);
}
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
/**
* ata_scsi_qc_new - acquire new ata_queued_cmd reference
* @dev: ATA device to which the new command is attached
* @cmd: SCSI command that originated this ATA command
* @done: SCSI command completion function
*
* Obtain a reference to an unused ata_queued_cmd structure,
* which is the basic libata structure representing a single
* ATA command sent to the hardware.
*
* If a command was available, fill in the SCSI-specific
* portions of the structure with information on the
* current command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Command allocated, or %NULL if none available.
*/
static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct ata_queued_cmd *qc;
qc = ata_qc_new_init(dev);
if (qc) {
qc->scsicmd = cmd;
qc->scsidone = done;
qc->sg = scsi_sglist(cmd);
qc->n_elem = scsi_sg_count(cmd);
} else {
cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
done(cmd);
}
return qc;
}
static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
qc->extrabytes = scmd->request->extra_len;
qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
}
/**
* ata_dump_status - user friendly display of error info
* @id: id of the port in question
* @tf: ptr to filled out taskfile
*
* Decode and dump the ATA error/status registers for the user so
* that they have some idea what really happened at the non
* make-believe layer.
*
* LOCKING:
* inherited from caller
*/
static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
{
u8 stat = tf->command, err = tf->feature;
printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
if (stat & ATA_BUSY) {
printk("Busy }\n"); /* Data is not valid in this case */
} else {
if (stat & 0x40) printk("DriveReady ");
if (stat & 0x20) printk("DeviceFault ");
if (stat & 0x10) printk("SeekComplete ");
if (stat & 0x08) printk("DataRequest ");
if (stat & 0x04) printk("CorrectedError ");
if (stat & 0x02) printk("Index ");
if (stat & 0x01) printk("Error ");
printk("}\n");
if (err) {
printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
if (err & 0x04) printk("DriveStatusError ");
if (err & 0x80) {
if (err & 0x04) printk("BadCRC ");
else printk("Sector ");
}
if (err & 0x40) printk("UncorrectableError ");
if (err & 0x10) printk("SectorIdNotFound ");
if (err & 0x02) printk("TrackZeroNotFound ");
if (err & 0x01) printk("AddrMarkNotFound ");
printk("}\n");
}
}
}
/**
* ata_to_sense_error - convert ATA error to SCSI error
* @id: ATA device number
* @drv_stat: value contained in ATA status register
* @drv_err: value contained in ATA error register
* @sk: the sense key we'll fill out
* @asc: the additional sense code we'll fill out
* @ascq: the additional sense code qualifier we'll fill out
* @verbose: be verbose
*
* Converts an ATA error into a SCSI error. Fill out pointers to
* SK, ASC, and ASCQ bytes for later use in fixed or descriptor
* format sense blocks.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
u8 *asc, u8 *ascq, int verbose)
{
int i;
/* Based on the 3ware driver translation table */
static const unsigned char sense_table[][4] = {
/* BBD|ECC|ID|MAR */
{0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
/* BBD|ECC|ID */
{0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
/* ECC|MC|MARK */
{0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error
/* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
{0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error
/* MC|ID|ABRT|TRK0|MARK */
{0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready
/* MCR|MARK */
{0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready
/* Bad address mark */
{0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field
/* TRK0 */
{0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error
/* Abort & !ICRC */
{0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command
/* Media change request */
{0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline
/* SRV */
{0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found
/* Media change */
{0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline
/* ECC */
{0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error
/* BBD - block marked bad */
{0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
};
static const unsigned char stat_table[][4] = {
/* Must be first because BUSY means no other bits valid */
{0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now
{0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault
{0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now
{0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
};
/*
* Is this an error we can process/parse
*/
if (drv_stat & ATA_BUSY) {
drv_err = 0; /* Ignore the err bits, they're invalid */
}
if (drv_err) {
/* Look for drv_err */
for (i = 0; sense_table[i][0] != 0xFF; i++) {
/* Look for best matches first */
if ((sense_table[i][0] & drv_err) ==
sense_table[i][0]) {
*sk = sense_table[i][1];
*asc = sense_table[i][2];
*ascq = sense_table[i][3];
goto translate_done;
}
}
/* No immediate match */
if (verbose)
printk(KERN_WARNING "ata%u: no sense translation for "
"error 0x%02x\n", id, drv_err);
}
/* Fall back to interpreting status bits */
for (i = 0; stat_table[i][0] != 0xFF; i++) {
if (stat_table[i][0] & drv_stat) {
*sk = stat_table[i][1];
*asc = stat_table[i][2];
*ascq = stat_table[i][3];
goto translate_done;
}
}
/* No error? Undecoded? */
if (verbose)
printk(KERN_WARNING "ata%u: no sense translation for "
"status: 0x%02x\n", id, drv_stat);
/* We need a sensible error return here, which is tricky, and one
that won't cause people to do things like return a disk wrongly */
*sk = ABORTED_COMMAND;
*asc = 0x00;
*ascq = 0x00;
translate_done:
if (verbose)
printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
"to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
id, drv_stat, drv_err, *sk, *asc, *ascq);
return;
}
/*
* ata_gen_passthru_sense - Generate check condition sense block.
* @qc: Command that completed.
*
* This function is specific to the ATA descriptor format sense
* block specified for the ATA pass through commands. Regardless
* of whether the command errored or not, return a sense
* block. Copy all controller registers into the sense
* block. Clear sense key, ASC & ASCQ if there is no error.
*
* LOCKING:
* None.
*/
static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
unsigned char *desc = sb + 8;
int verbose = qc->ap->ops->error_handler == NULL;
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
/*
* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
&sb[1], &sb[2], &sb[3], verbose);
sb[1] &= 0x0f;
}
/*
* Sense data is current and format is descriptor.
*/
sb[0] = 0x72;
desc[0] = 0x09;
/* set length of additional sense data */
sb[7] = 14;
desc[1] = 12;
/*
* Copy registers into sense buffer.
*/
desc[2] = 0x00;
desc[3] = tf->feature; /* == error reg */
desc[5] = tf->nsect;
desc[7] = tf->lbal;
desc[9] = tf->lbam;
desc[11] = tf->lbah;
desc[12] = tf->device;
desc[13] = tf->command; /* == status reg */
/*
* Fill in Extend bit, and the high order bytes
* if applicable.
*/
if (tf->flags & ATA_TFLAG_LBA48) {
desc[2] |= 0x01;
desc[4] = tf->hob_nsect;
desc[6] = tf->hob_lbal;
desc[8] = tf->hob_lbam;
desc[10] = tf->hob_lbah;
}
}
/**
* ata_gen_ata_sense - generate a SCSI fixed sense block
* @qc: Command that we are erroring out
*
* Generate sense block for a failed ATA command @qc. Descriptor
* format is used to accomodate LBA48 block address.
*
* LOCKING:
* None.
*/
static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
{
struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
unsigned char *desc = sb + 8;
int verbose = qc->ap->ops->error_handler == NULL;
u64 block;
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
/* sense data is current and format is descriptor */
sb[0] = 0x72;
/* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
&sb[1], &sb[2], &sb[3], verbose);
sb[1] &= 0x0f;
}
block = ata_tf_read_block(&qc->result_tf, dev);
/* information sense data descriptor */
sb[7] = 12;
desc[0] = 0x00;
desc[1] = 10;
desc[2] |= 0x80; /* valid */
desc[6] = block >> 40;
desc[7] = block >> 32;
desc[8] = block >> 24;
desc[9] = block >> 16;
desc[10] = block >> 8;
desc[11] = block;
}
static void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to
* prevent SCSI midlayer from automatically deferring
* requests.
*/
sdev->max_device_blocked = 1;
}
/**
* atapi_drain_needed - Check whether data transfer may overflow
* @rq: request to be checked
*
* ATAPI commands which transfer variable length data to host
* might overflow due to application error or hardare bug. This
* function checks whether overflow should be drained and ignored
* for @request.
*
* LOCKING:
* None.
*
* RETURNS:
* 1 if ; otherwise, 0.
*/
static int atapi_drain_needed(struct request *rq)
{
if (likely(!blk_pc_request(rq)))
return 0;
if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
}
static int ata_scsi_dev_config(struct scsi_device *sdev,
struct ata_device *dev)
{
if (!ata_id_has_unload(dev->id))
dev->flags |= ATA_DFLAG_NO_UNLOAD;
/* configure max sectors */
blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors);
sdev->sector_size = ATA_SECT_SIZE;
if (dev->class == ATA_DEV_ATAPI) {
struct request_queue *q = sdev->request_queue;
void *buf;
/* set DMA padding */
blk_queue_update_dma_pad(sdev->request_queue,
ATA_DMA_PAD_SZ - 1);
/* configure draining */
buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
if (!buf) {
ata_dev_printk(dev, KERN_ERR,
"drain buffer allocation failed\n");
return -ENOMEM;
}
blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
} else {
sdev->manage_start_stop = 1;
}
/*
* ata_pio_sectors() expects buffer for each sector to not cross
* page boundary. Enforce it by requiring buffers to be sector
* aligned, which works iff sector_size is not larger than
* PAGE_SIZE. ATAPI devices also need the alignment as
* IDENTIFY_PACKET is executed as ATA_PROT_PIO.
*/
if (sdev->sector_size > PAGE_SIZE)
ata_dev_printk(dev, KERN_WARNING,
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
sdev->sector_size);
blk_queue_update_dma_alignment(sdev->request_queue,
sdev->sector_size - 1);
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
if (dev->flags & ATA_DFLAG_NCQ) {
int depth;
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
depth = min(ATA_MAX_QUEUE - 1, depth);
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
}
return 0;
}
/**
* ata_scsi_slave_config - Set SCSI device attributes
* @sdev: SCSI device to examine
*
* This is called before we actually start reading
* and writing to the device, to configure certain
* SCSI mid-layer behaviors.
*
* LOCKING:
* Defined by SCSI layer. We don't really care.
*/
int ata_scsi_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
int rc = 0;
ata_scsi_sdev_config(sdev);
if (dev)
rc = ata_scsi_dev_config(sdev, dev);
return rc;
}
/**
* ata_scsi_slave_destroy - SCSI device is about to be destroyed
* @sdev: SCSI device to be destroyed
*
* @sdev is about to be destroyed for hot/warm unplugging. If
* this unplugging was initiated by libata as indicated by NULL
* dev->sdev, this function doesn't have to do anything.
* Otherwise, SCSI layer initiated warm-unplug is in progress.
* Clear dev->sdev, schedule the device for ATA detach and invoke
* EH.
*
* LOCKING:
* Defined by SCSI layer. We don't really care.
*/
void ata_scsi_slave_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct request_queue *q = sdev->request_queue;
unsigned long flags;
struct ata_device *dev;
if (!ap->ops->error_handler)
return;
spin_lock_irqsave(ap->lock, flags);
dev = __ata_scsi_find_dev(ap, sdev);
if (dev && dev->sdev) {
/* SCSI device already in CANCEL state, no need to offline it */
dev->sdev = NULL;
dev->flags |= ATA_DFLAG_DETACH;
ata_port_schedule_eh(ap);
}
spin_unlock_irqrestore(ap->lock, flags);
kfree(q->dma_drain_buffer);
q->dma_drain_buffer = NULL;
q->dma_drain_size = 0;
}
/**
* ata_scsi_change_queue_depth - SCSI callback for queue depth config
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
* @reason: calling context
*
* This is libata standard hostt->change_queue_depth callback.
* SCSI will call into this callback when user tries to set queue
* depth via sysfs.
*
* LOCKING:
* SCSI layer (we don't care)
*
* RETURNS:
* Newly configured queue depth.
*/
int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
int reason)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
unsigned long flags;
if (reason != SCSI_QDEPTH_DEFAULT)
return -EOPNOTSUPP;
if (queue_depth < 1 || queue_depth == sdev->queue_depth)
return sdev->queue_depth;
dev = ata_scsi_find_dev(ap, sdev);
if (!dev || !ata_dev_enabled(dev))
return sdev->queue_depth;
/* NCQ enabled? */
spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_NCQ_OFF;
if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
dev->flags |= ATA_DFLAG_NCQ_OFF;
queue_depth = 1;
}
spin_unlock_irqrestore(ap->lock, flags);
/* limit and apply queue depth */
queue_depth = min(queue_depth, sdev->host->can_queue);
queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1);
if (sdev->queue_depth == queue_depth)
return -EINVAL;
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
return queue_depth;
}
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
* @qc: Storage for translated ATA taskfile
*
* Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
* (to start). Perhaps these commands should be preceded by
* CHECK POWER MODE to see what power mode the device is already in.
* [See SAT revision 5 at www.t10.org]
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
*/
static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->tf;
const u8 *cdb = scmd->cmnd;
if (scmd->cmd_len < 5)
goto invalid_fld;
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf->protocol = ATA_PROT_NODATA;
if (cdb[1] & 0x1) {
; /* ignore IMMED bit, violates sat-r05 */
}
if (cdb[4] & 0x2)
goto invalid_fld; /* LOEJ bit set not supported */
if (((cdb[4] >> 4) & 0xf) != 0)
goto invalid_fld; /* power conditions not supported */
if (cdb[4] & 0x1) {
tf->nsect = 1; /* 1 sector, lba=0 */
if (qc->dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
tf->lbah = 0x0;
tf->lbam = 0x0;
tf->lbal = 0x0;
tf->device |= ATA_LBA;
} else {
/* CHS */
tf->lbal = 0x1; /* sect */
tf->lbam = 0x0; /* cyl low */
tf->lbah = 0x0; /* cyl high */
}
tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
} else {
/* Some odd clown BIOSen issue spindown on power off (ACPI S4
* or S5) causing some drives to spin up and down again.
*/
if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
system_state == SYSTEM_POWER_OFF)
goto skip;
if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
system_entering_hibernation())
goto skip;
/* Issue ATA STANDBY IMMEDIATE command */
tf->command = ATA_CMD_STANDBYNOW1;
}
/*
* Standby and Idle condition timers could be implemented but that
* would require libata to implement the Power condition mode page
* and allow the user to change it. Changing mode pages requires
* MODE SELECT to be implemented.
*/
return 0;
invalid_fld:
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
/* "Invalid field in cbd" */
return 1;
skip:
scmd->result = SAM_STAT_GOOD;
return 1;
}
/**
* ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
* @qc: Storage for translated ATA taskfile
*
* Sets up an ATA taskfile to issue FLUSH CACHE or
* FLUSH CACHE EXT.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
*/
static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
tf->flags |= ATA_TFLAG_DEVICE;
tf->protocol = ATA_PROT_NODATA;
if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
tf->command = ATA_CMD_FLUSH_EXT;
else
tf->command = ATA_CMD_FLUSH;
/* flush is critical for IO integrity, consider it an IO command */
qc->flags |= ATA_QCFLAG_IO;
return 0;
}
/**
* scsi_6_lba_len - Get LBA and transfer length
* @cdb: SCSI command to translate
*
* Calculate LBA and transfer length for 6-byte commands.
*
* RETURNS:
* @plba: the LBA
* @plen: the transfer length
*/
static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
u64 lba = 0;
u32 len;
VPRINTK("six-byte command\n");
lba |= ((u64)(cdb[1] & 0x1f)) << 16;
lba |= ((u64)cdb[2]) << 8;
lba |= ((u64)cdb[3]);
len = cdb[4];
*plba = lba;
*plen = len;
}
/**
* scsi_10_lba_len - Get LBA and transfer length
* @cdb: SCSI command to translate
*
* Calculate LBA and transfer length for 10-byte commands.
*
* RETURNS:
* @plba: the LBA
* @plen: the transfer length
*/
static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
u64 lba = 0;
u32 len = 0;
VPRINTK("ten-byte command\n");
lba |= ((u64)cdb[2]) << 24;
lba |= ((u64)cdb[3]) << 16;
lba |= ((u64)cdb[4]) << 8;
lba |= ((u64)cdb[5]);
len |= ((u32)cdb[7]) << 8;
len |= ((u32)cdb[8]);
*plba = lba;
*plen = len;
}
/**
* scsi_16_lba_len - Get LBA and transfer length
* @cdb: SCSI command to translate
*
* Calculate LBA and transfer length for 16-byte commands.
*
* RETURNS:
* @plba: the LBA
* @plen: the transfer length
*/
static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
u64 lba = 0;
u32 len = 0;
VPRINTK("sixteen-byte command\n");
lba |= ((u64)cdb[2]) << 56;
lba |= ((u64)cdb[3]) << 48;
lba |= ((u64)cdb[4]) << 40;
lba |= ((u64)cdb[5]) << 32;
lba |= ((u64)cdb[6]) << 24;
lba |= ((u64)cdb[7]) << 16;
lba |= ((u64)cdb[8]) << 8;
lba |= ((u64)cdb[9]);
len |= ((u32)cdb[10]) << 24;
len |= ((u32)cdb[11]) << 16;
len |= ((u32)cdb[12]) << 8;
len |= ((u32)cdb[13]);
*plba = lba;
*plen = len;
}
/**
* ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
* @qc: Storage for translated ATA taskfile
*
* Converts SCSI VERIFY command to an ATA READ VERIFY command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
*/
static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
u64 dev_sectors = qc->dev->n_sectors;
const u8 *cdb = scmd->cmnd;
u64 block;
u32 n_block;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->protocol = ATA_PROT_NODATA;
if (cdb[0] == VERIFY) {
if (scmd->cmd_len < 10)
goto invalid_fld;
scsi_10_lba_len(cdb, &block, &n_block);
} else if (cdb[0] == VERIFY_16) {
if (scmd->cmd_len < 16)
goto invalid_fld;
scsi_16_lba_len(cdb, &block, &n_block);
} else
goto invalid_fld;
if (!n_block)
goto nothing_to_do;
if (block >= dev_sectors)
goto out_of_range;
if ((block + n_block) > dev_sectors)
goto out_of_range;
if (dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
if (lba_28_ok(block, n_block)) {
/* use LBA28 */
tf->command = ATA_CMD_VERIFY;
tf->device |= (block >> 24) & 0xf;
} else if (lba_48_ok(block, n_block)) {
if (!(dev->flags & ATA_DFLAG_LBA48))
goto out_of_range;
/* use LBA48 */
tf->flags |= ATA_TFLAG_LBA48;
tf->command = ATA_CMD_VERIFY_EXT;
tf->hob_nsect = (n_block >> 8) & 0xff;
tf->hob_lbah = (block >> 40) & 0xff;
tf->hob_lbam = (block >> 32) & 0xff;
tf->hob_lbal = (block >> 24) & 0xff;
} else
/* request too large even for LBA48 */
goto out_of_range;
tf->nsect = n_block & 0xff;
tf->lbah = (block >> 16) & 0xff;
tf->lbam = (block >> 8) & 0xff;
tf->lbal = block & 0xff;
tf->device |= ATA_LBA;
} else {
/* CHS */
u32 sect, head, cyl, track;
if (!lba_28_ok(block, n_block))
goto out_of_range;
/* Convert LBA to CHS */
track = (u32)block / dev->sectors;
cyl = track / dev->heads;
head = track % dev->heads;
sect = (u32)block % dev->sectors + 1;
DPRINTK("block %u track %u cyl %u head %u sect %u\n",
(u32)block, track, cyl, head, sect);
/* Check whether the converted CHS can fit.
Cylinder: 0-65535
Head: 0-15
Sector: 1-255*/
if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
goto out_of_range;
tf->command = ATA_CMD_VERIFY;
tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
tf->lbal = sect;
tf->lbam = cyl;
tf->lbah = cyl >> 8;
tf->device |= head;
}
return 0;
invalid_fld:
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
/* "Invalid field in cbd" */
return 1;
out_of_range:
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
/* "Logical Block Address out of range" */
return 1;
nothing_to_do:
scmd->result = SAM_STAT_GOOD;
return 1;
}
/**
* ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
* @qc: Storage for translated ATA taskfile
*
* Converts any of six SCSI read/write commands into the
* ATA counterpart, including starting sector (LBA),
* sector count, and taking into account the device's LBA48
* support.
*
* Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
* %WRITE_16 are currently supported.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
*/
static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
unsigned int tf_flags = 0;
u64 block;
u32 n_block;
int rc;
if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
tf_flags |= ATA_TFLAG_WRITE;
/* Calculate the SCSI LBA, transfer length and FUA. */
switch (cdb[0]) {
case READ_10:
case WRITE_10:
if (unlikely(scmd->cmd_len < 10))
goto invalid_fld;
scsi_10_lba_len(cdb, &block, &n_block);
if (unlikely(cdb[1] & (1 << 3)))
tf_flags |= ATA_TFLAG_FUA;
break;
case READ_6:
case WRITE_6:
if (unlikely(scmd->cmd_len < 6))
goto invalid_fld;
scsi_6_lba_len(cdb, &block, &n_block);
/* for 6-byte r/w commands, transfer length 0
* means 256 blocks of data, not 0 block.
*/
if (!n_block)
n_block = 256;
break;
case READ_16:
case WRITE_16:
if (unlikely(scmd->cmd_len < 16))
goto invalid_fld;
scsi_16_lba_len(cdb, &block, &n_block);
if (unlikely(cdb[1] & (1 << 3)))
tf_flags |= ATA_TFLAG_FUA;
break;
default:
DPRINTK("no-byte command\n");
goto invalid_fld;
}
/* Check and compose ATA command */
if (!n_block)
/* For 10-byte and 16-byte SCSI R/W commands, transfer
* length 0 means transfer 0 block of data.
* However, for ATA R/W commands, sector count 0 means
* 256 or 65536 sectors, not 0 sectors as in SCSI.
*
* WARNING: one or two older ATA drives treat 0 as 0...
*/
goto nothing_to_do;
qc->flags |= ATA_QCFLAG_IO;
qc->nbytes = n_block * ATA_SECT_SIZE;
rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
qc->tag);
if (likely(rc == 0))
return 0;
if (rc == -ERANGE)
goto out_of_range;
/* treat all other errors as -EINVAL, fall through */
invalid_fld:
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
/* "Invalid field in cbd" */
return 1;
out_of_range:
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
/* "Logical Block Address out of range" */
return 1;
nothing_to_do:
scmd->result = SAM_STAT_GOOD;
return 1;
}
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
int need_sense = (qc->err_mask != 0);
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
* generate because the user forced us to, a check condition
* is generated and the ATA register values are returned
* whether the command completed successfully or not. If there
* was no error, SK, ASC and ASCQ will all be zero.
*/
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
((cdb[2] & 0x20) || need_sense)) {
ata_gen_passthru_sense(qc);
} else {
if (!need_sense) {
cmd->result = SAM_STAT_GOOD;
} else {
/* TODO: decide which descriptor format to use
* for 48b LBA devices and call that here
* instead of the fixed desc, which is only
* good for smaller LBA (and maybe CHS?)
* devices.
*/
ata_gen_ata_sense(qc);
}
}
if (need_sense && !ap->ops->error_handler)
ata_dump_status(ap->print_id, &qc->result_tf);
qc->scsidone(cmd);
ata_qc_free(qc);
}
/**
* ata_scsi_translate - Translate then issue SCSI command to ATA device
* @dev: ATA device to which the command is addressed
* @cmd: SCSI command to execute
* @done: SCSI command completion function
* @xlat_func: Actor which translates @cmd to an ATA taskfile
*
* Our ->queuecommand() function has decided that the SCSI
* command issued can be directly translated into an ATA
* command, rather than handled internally.
*
* This function sets up an ata_queued_cmd structure for the
* SCSI command, and sends that ata_queued_cmd to the hardware.
*
* The xlat_func argument (actor) returns 0 if ready to execute
* ATA command, else 1 to finish translation. If 1 is returned
* then cmd->result (and possibly cmd->sense_buffer) are assumed
* to be set reflecting an error condition or clean (early)
* termination.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
* needs to be deferred.
*/
static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *),
ata_xlat_func_t xlat_func)
{
struct ata_port *ap = dev->link->ap;
struct ata_queued_cmd *qc;
int rc;
VPRINTK("ENTER\n");
qc = ata_scsi_qc_new(dev, cmd, done);
if (!qc)
goto err_mem;
/* data is present; dma-map it */
if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
cmd->sc_data_direction == DMA_TO_DEVICE) {
if (unlikely(scsi_bufflen(cmd) < 1)) {
ata_dev_printk(dev, KERN_WARNING,
"WARNING: zero len r/w req\n");
goto err_did;
}
ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
qc->dma_dir = cmd->sc_data_direction;
}
qc->complete_fn = ata_scsi_qc_complete;
if (xlat_func(qc))
goto early_finish;
if (ap->ops->qc_defer) {
if ((rc = ap->ops->qc_defer(qc)))
goto defer;
}
/* select device, send command to hardware */
ata_qc_issue(qc);
VPRINTK("EXIT\n");
return 0;
early_finish:
ata_qc_free(qc);
qc->scsidone(cmd);
DPRINTK("EXIT - early finish (good or error)\n");
return 0;
err_did:
ata_qc_free(qc);
cmd->result = (DID_ERROR << 16);
qc->scsidone(cmd);
err_mem:
DPRINTK("EXIT - internal\n");
return 0;
defer:
ata_qc_free(qc);
DPRINTK("EXIT - defer\n");
if (rc == ATA_DEFER_LINK)
return SCSI_MLQUEUE_DEVICE_BUSY;
else
return SCSI_MLQUEUE_HOST_BUSY;
}
/**
* ata_scsi_rbuf_get - Map response buffer.
* @cmd: SCSI command containing buffer to be mapped.
* @flags: unsigned long variable to store irq enable status
* @copy_in: copy in from user buffer
*
* Prepare buffer for simulated SCSI commands.
*
* LOCKING:
* spin_lock_irqsave(ata_scsi_rbuf_lock) on success
*
* RETURNS:
* Pointer to response buffer.
*/
static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in,
unsigned long *flags)
{
spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags);
memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
if (copy_in)
sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
return ata_scsi_rbuf;
}
/**
* ata_scsi_rbuf_put - Unmap response buffer.
* @cmd: SCSI command containing buffer to be unmapped.
* @copy_out: copy out result
* @flags: @flags passed to ata_scsi_rbuf_get()
*
* Returns rbuf buffer. The result is copied to @cmd's buffer if
* @copy_back is true.
*
* LOCKING:
* Unlocks ata_scsi_rbuf_lock.
*/
static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
unsigned long *flags)
{
if (copy_out)
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
}
/**
* ata_scsi_rbuf_fill - wrapper for SCSI command simulators
* @args: device IDENTIFY data / SCSI command of interest.
* @actor: Callback hook for desired SCSI command simulator
*
* Takes care of the hard work of simulating a SCSI command...
* Mapping the response buffer, calling the command's handler,
* and handling the handler's return value. This return value
* indicates whether the handler wishes the SCSI command to be
* completed successfully (0), or not (in which case cmd->result
* and sense buffer are assumed to be set).
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
{
u8 *rbuf;
unsigned int rc;
struct scsi_cmnd *cmd = args->cmd;
unsigned long flags;
rbuf = ata_scsi_rbuf_get(cmd, false, &flags);
rc = actor(args, rbuf);
ata_scsi_rbuf_put(cmd, rc == 0, &flags);
if (rc == 0)
cmd->result = SAM_STAT_GOOD;
args->done(cmd);
}
/**
* ata_scsiop_inq_std - Simulate INQUIRY command
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns standard device identification data associated
* with non-VPD INQUIRY command output.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
{
const u8 versions[] = {
0x60, /* SAM-3 (no version claimed) */
0x03,
0x20, /* SBC-2 (no version claimed) */
0x02,
0x60 /* SPC-3 (no version claimed) */
};
u8 hdr[] = {
TYPE_DISK,
0,
0x5, /* claim SPC-3 version compatibility */
2,
95 - 4
};
VPRINTK("ENTER\n");
/* set scsi removeable (RMB) bit per ata bit */
if (ata_id_removeable(args->id))
hdr[1] |= (1 << 7);
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
if (rbuf[32] == 0 || rbuf[32] == ' ')
memcpy(&rbuf[32], "n/a ", 4);
memcpy(rbuf + 59, versions, sizeof(versions));
return 0;
}
/**
* ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns list of inquiry VPD pages available.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
{
const u8 pages[] = {
0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */
0x83, /* page 0x83, device ident page */
0x89, /* page 0x89, ata info page */
0xb0, /* page 0xb0, block limits page */
0xb1, /* page 0xb1, block device characteristics page */
};
rbuf[3] = sizeof(pages); /* number of supported VPD pages */
memcpy(rbuf + 4, pages, sizeof(pages));
return 0;
}
/**
* ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns ATA device serial number.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
{
const u8 hdr[] = {
0,
0x80, /* this page code */
0,
ATA_ID_SERNO_LEN, /* page len */
};
memcpy(rbuf, hdr, sizeof(hdr));
ata_id_string(args->id, (unsigned char *) &rbuf[4],
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
return 0;
}
/**
* ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields two logical unit device identification designators:
* - vendor specific ASCII containing the ATA serial number
* - SAT defined "t10 vendor id based" containing ASCII vendor
* name ("ATA "), model and serial numbers.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
{
const int sat_model_serial_desc_len = 68;
int num;
rbuf[1] = 0x83; /* this page code */
num = 4;
/* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
rbuf[num + 0] = 2;
rbuf[num + 3] = ATA_ID_SERNO_LEN;
num += 4;
ata_id_string(args->id, (unsigned char *) rbuf + num,
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
/* SAT defined lu model and serial numbers descriptor */
/* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
rbuf[num + 0] = 2;
rbuf[num + 1] = 1;
rbuf[num + 3] = sat_model_serial_desc_len;
num += 4;
memcpy(rbuf + num, "ATA ", 8);
num += 8;
ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
ATA_ID_PROD_LEN);
num += ATA_ID_PROD_LEN;
ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
return 0;
}
/**
* ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields SAT-specified ATA VPD page.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_taskfile tf;
memset(&tf, 0, sizeof(tf));
rbuf[1] = 0x89; /* our page code */
rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
rbuf[3] = (0x238 & 0xff);
memcpy(&rbuf[8], "linux ", 8);
memcpy(&rbuf[16], "libata ", 16);
memcpy(&rbuf[32], DRV_VERSION, 4);
ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
/* we don't store the ATA device signature, so we fake it */
tf.command = ATA_DRDY; /* really, this is Status reg */
tf.lbal = 0x1;
tf.nsect = 0x1;
ata_tf_to_fis(&tf, 0, 1, &rbuf[36]); /* TODO: PMP? */
rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */
rbuf[56] = ATA_CMD_ID_ATA;
memcpy(&rbuf[60], &args->id[0], 512);
return 0;
}
static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
{
u32 min_io_sectors;
rbuf[1] = 0xb0;
rbuf[3] = 0x3c; /* required VPD size with unmap support */
/*
* Optimal transfer length granularity.
*
* This is always one physical block, but for disks with a smaller
* logical than physical sector size we need to figure out what the
* latter is.
*/
if (ata_id_has_large_logical_sectors(args->id))
min_io_sectors = ata_id_logical_per_physical_sectors(args->id);
else
min_io_sectors = 1;
put_unaligned_be16(min_io_sectors, &rbuf[6]);
/*
* Optimal unmap granularity.
*
* The ATA spec doesn't even know about a granularity or alignment
* for the TRIM command. We can leave away most of the unmap related
* VPD page entries, but we have specifify a granularity to signal
* that we support some form of unmap - in thise case via WRITE SAME
* with the unmap bit set.
*/
if (ata_id_has_trim(args->id)) {
put_unaligned_be32(65535 * 512 / 8, &rbuf[20]);
put_unaligned_be32(1, &rbuf[28]);
}
return 0;
}
static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
{
int form_factor = ata_id_form_factor(args->id);
int media_rotation_rate = ata_id_rotation_rate(args->id);
rbuf[1] = 0xb1;
rbuf[3] = 0x3c;
rbuf[4] = media_rotation_rate >> 8;
rbuf[5] = media_rotation_rate;
rbuf[7] = form_factor;
return 0;
}
/**
* ata_scsiop_noop - Command handler that simply returns success.
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* No operation. Simply returns success to caller, to indicate
* that the caller should successfully complete this SCSI command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
{
VPRINTK("ENTER\n");
return 0;
}
/**
* ata_msense_caching - Simulate MODE SENSE caching info page
* @id: device IDENTIFY data
* @buf: output buffer
*
* Generate a caching info page, which conditionally indicates
* write caching to the SCSI layer, depending on device
* capabilities.
*
* LOCKING:
* None.
*/
static unsigned int ata_msense_caching(u16 *id, u8 *buf)
{
memcpy(buf, def_cache_mpage, sizeof(def_cache_mpage));
if (ata_id_wcache_enabled(id))
buf[2] |= (1 << 2); /* write cache enable */
if (!ata_id_rahead_enabled(id))
buf[12] |= (1 << 5); /* disable read ahead */
return sizeof(def_cache_mpage);
}
/**
* ata_msense_ctl_mode - Simulate MODE SENSE control mode page
* @buf: output buffer
*
* Generate a generic MODE SENSE control mode page.
*
* LOCKING:
* None.
*/
static unsigned int ata_msense_ctl_mode(u8 *buf)
{
memcpy(buf, def_control_mpage, sizeof(def_control_mpage));
return sizeof(def_control_mpage);
}
/**
* ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
* @buf: output buffer
*
* Generate a generic MODE SENSE r/w error recovery page.
*
* LOCKING:
* None.
*/
static unsigned int ata_msense_rw_recovery(u8 *buf)
{
memcpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage));
return sizeof(def_rw_recovery_mpage);
}
/*
* We can turn this into a real blacklist if it's needed, for now just
* blacklist any Maxtor BANC1G10 revision firmware
*/
static int ata_dev_supports_fua(u16 *id)
{
unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
if (!libata_fua)
return 0;
if (!ata_id_has_fua(id))
return 0;
ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
if (strcmp(model, "Maxtor"))
return 1;
if (strcmp(fw, "BANC1G10"))
return 1;
return 0; /* blacklisted */
}
/**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate MODE SENSE commands. Assume this is invoked for direct
* access devices (e.g. disks) only. There should be no block
* descriptor for other device types.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_device *dev = args->dev;
u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
const u8 sat_blk_desc[] = {
0, 0, 0, 0, /* number of blocks: sat unspecified */
0,
0, 0x2, 0x0 /* block length: 512 bytes */
};
u8 pg, spg;
unsigned int ebd, page_control, six_byte;
u8 dpofua;
VPRINTK("ENTER\n");
six_byte = (scsicmd[0] == MODE_SENSE);
ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
/*
* LLBA bit in msense(10) ignored (compliant)
*/
page_control = scsicmd[2] >> 6;
switch (page_control) {
case 0: /* current */
break; /* supported */
case 3: /* saved */
goto saving_not_supp;
case 1: /* changeable */
case 2: /* defaults */
default:
goto invalid_fld;
}
if (six_byte)
p += 4 + (ebd ? 8 : 0);
else
p += 8 + (ebd ? 8 : 0);
pg = scsicmd[2] & 0x3f;
spg = scsicmd[3];
/*
* No mode subpages supported (yet) but asking for _all_
* subpages may be valid
*/
if (spg && (spg != ALL_SUB_MPAGES))
goto invalid_fld;
switch(pg) {
case RW_RECOVERY_MPAGE:
p += ata_msense_rw_recovery(p);
break;
case CACHE_MPAGE:
p += ata_msense_caching(args->id, p);
break;
case CONTROL_MPAGE:
p += ata_msense_ctl_mode(p);
break;
case ALL_MPAGES:
p += ata_msense_rw_recovery(p);
p += ata_msense_caching(args->id, p);
p += ata_msense_ctl_mode(p);
break;
default: /* invalid page code */
goto invalid_fld;
}
dpofua = 0;
if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
(!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
dpofua = 1 << 4;
if (six_byte) {
rbuf[0] = p - rbuf - 1;
rbuf[2] |= dpofua;
if (ebd) {
rbuf[3] = sizeof(sat_blk_desc);
memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
}
} else {
unsigned int output_len = p - rbuf - 2;
rbuf[0] = output_len >> 8;
rbuf[1] = output_len;
rbuf[3] |= dpofua;
if (ebd) {
rbuf[7] = sizeof(sat_blk_desc);
memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
}
}
return 0;
invalid_fld:
ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
/* "Invalid field in cbd" */
return 1;
saving_not_supp:
ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
/* "Saving parameters not supported" */
return 1;
}
/**
* ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate READ CAPACITY commands.
*
* LOCKING:
* None.
*/
static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_device *dev = args->dev;
u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
u8 log_per_phys = 0;
u16 lowest_aligned = 0;
u16 word_106 = dev->id[106];
u16 word_209 = dev->id[209];
if ((word_106 & 0xc000) == 0x4000) {
/* Number and offset of logical sectors per physical sector */
if (word_106 & (1 << 13))
log_per_phys = word_106 & 0xf;
if ((word_209 & 0xc000) == 0x4000) {
u16 first = dev->id[209] & 0x3fff;
if (first > 0)
lowest_aligned = (1 << log_per_phys) - first;
}
}
VPRINTK("ENTER\n");
if (args->cmd->cmnd[0] == READ_CAPACITY) {
if (last_lba >= 0xffffffffULL)
last_lba = 0xffffffff;
/* sector count, 32-bit */
rbuf[0] = last_lba >> (8 * 3);
rbuf[1] = last_lba >> (8 * 2);
rbuf[2] = last_lba >> (8 * 1);
rbuf[3] = last_lba;
/* sector size */
rbuf[6] = ATA_SECT_SIZE >> 8;
rbuf[7] = ATA_SECT_SIZE & 0xff;
} else {
/* sector count, 64-bit */
rbuf[0] = last_lba >> (8 * 7);
rbuf[1] = last_lba >> (8 * 6);
rbuf[2] = last_lba >> (8 * 5);
rbuf[3] = last_lba >> (8 * 4);
rbuf[4] = last_lba >> (8 * 3);
rbuf[5] = last_lba >> (8 * 2);
rbuf[6] = last_lba >> (8 * 1);
rbuf[7] = last_lba;
/* sector size */
rbuf[10] = ATA_SECT_SIZE >> 8;
rbuf[11] = ATA_SECT_SIZE & 0xff;
rbuf[12] = 0;
rbuf[13] = log_per_phys;
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
rbuf[15] = lowest_aligned;
if (ata_id_has_trim(args->id)) {
rbuf[14] |= 0x80; /* TPE */
if (ata_id_has_zero_after_trim(args->id))
rbuf[14] |= 0x40; /* TPRZ */
}
}
return 0;
}
/**
* ata_scsiop_report_luns - Simulate REPORT LUNS command
* @args: device IDENTIFY data / SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate REPORT LUNS command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
{
VPRINTK("ENTER\n");
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
return 0;
}
static void atapi_sense_complete(struct ata_queued_cmd *qc)
{
if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
/* FIXME: not quite right; we don't want the
* translation of taskfile registers into
* a sense descriptors, since that's only
* correct for ATA, not ATAPI
*/
ata_gen_passthru_sense(qc);
}
qc->scsidone(qc->scsicmd);
ata_qc_free(qc);
}
/* is it pointless to prefer PIO for "safety reasons"? */
static inline int ata_pio_use_silly(struct ata_port *ap)
{
return (ap->flags & ATA_FLAG_PIO_DMA);
}
static void atapi_request_sense(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scsi_cmnd *cmd = qc->scsicmd;
DPRINTK("ATAPI request sense\n");
/* FIXME: is this needed? */
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
#ifdef CONFIG_ATA_SFF
if (ap->ops->sff_tf_read)
ap->ops->sff_tf_read(ap, &qc->tf);
#endif
/* fill these in, for the case where they are -not- overwritten */
cmd->sense_buffer[0] = 0x70;
cmd->sense_buffer[2] = qc->tf.feature >> 4;
ata_qc_reinit(qc);
/* setup sg table and init transfer direction */
sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
ata_sg_init(qc, &qc->sgent, 1);
qc->dma_dir = DMA_FROM_DEVICE;
memset(&qc->cdb, 0, qc->dev->cdb_len);
qc->cdb[0] = REQUEST_SENSE;
qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
qc->tf.command = ATA_CMD_PACKET;
if (ata_pio_use_silly(ap)) {
qc->tf.protocol = ATAPI_PROT_DMA;
qc->tf.feature |= ATAPI_PKT_DMA;
} else {
qc->tf.protocol = ATAPI_PROT_PIO;
qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
qc->tf.lbah = 0;
}
qc->nbytes = SCSI_SENSE_BUFFERSIZE;
qc->complete_fn = atapi_sense_complete;
ata_qc_issue(qc);
DPRINTK("EXIT\n");
}
static void atapi_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
unsigned int err_mask = qc->err_mask;
VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
/* handle completion from new EH */
if (unlikely(qc->ap->ops->error_handler &&
(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
/* FIXME: not quite right; we don't want the
* translation of taskfile registers into a
* sense descriptors, since that's only
* correct for ATA, not ATAPI
*/
ata_gen_passthru_sense(qc);
}
/* SCSI EH automatically locks door if sdev->locked is
* set. Sometimes door lock request continues to
* fail, for example, when no media is present. This
* creates a loop - SCSI EH issues door lock which
* fails and gets invoked again to acquire sense data
* for the failed command.
*
* If door lock fails, always clear sdev->locked to
* avoid this infinite loop.
*
* This may happen before SCSI scan is complete. Make
* sure qc->dev->sdev isn't NULL before dereferencing.
*/
if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
qc->dev->sdev->locked = 0;
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
qc->scsidone(cmd);
ata_qc_free(qc);
return;
}
/* successful completion or old EH failure path */
if (unlikely(err_mask & AC_ERR_DEV)) {
cmd->result = SAM_STAT_CHECK_CONDITION;
atapi_request_sense(qc);
return;
} else if (unlikely(err_mask)) {
/* FIXME: not quite right; we don't want the
* translation of taskfile registers into
* a sense descriptors, since that's only
* correct for ATA, not ATAPI
*/
ata_gen_passthru_sense(qc);
} else {
u8 *scsicmd = cmd->cmnd;
if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
unsigned long flags;
u8 *buf;
buf = ata_scsi_rbuf_get(cmd, true, &flags);
/* ATAPI devices typically report zero for their SCSI version,
* and sometimes deviate from the spec WRT response data
* format. If SCSI version is reported as zero like normal,
* then we make the following fixups: 1) Fake MMC-5 version,
* to indicate to the Linux scsi midlayer this is a modern
* device. 2) Ensure response data format / ATAPI information
* are always correct.
*/
if (buf[2] == 0) {
buf[2] = 0x5;
buf[3] = 0x32;
}
ata_scsi_rbuf_put(cmd, true, &flags);
}
cmd->result = SAM_STAT_GOOD;
}
qc->scsidone(cmd);
ata_qc_free(qc);
}
/**
* atapi_xlat - Initialize PACKET taskfile
* @qc: command structure to be initialized
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on failure.
*/
static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
int nodata = (scmd->sc_data_direction == DMA_NONE);
int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
unsigned int nbytes;
memset(qc->cdb, 0, dev->cdb_len);
memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
qc->complete_fn = atapi_qc_complete;
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
qc->tf.flags |= ATA_TFLAG_WRITE;
DPRINTK("direction: write\n");
}
qc->tf.command = ATA_CMD_PACKET;
ata_qc_set_pc_nbytes(qc);
/* check whether ATAPI DMA is safe */
if (!nodata && !using_pio && atapi_check_dma(qc))
using_pio = 1;
/* Some controller variants snoop this value for Packet
* transfers to do state machine and FIFO management. Thus we
* want to set it properly, and for DMA where it is
* effectively meaningless.
*/
nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024);
/* Most ATAPI devices which honor transfer chunk size don't
* behave according to the spec when odd chunk size which
* matches the transfer length is specified. If the number of
* bytes to transfer is 2n+1. According to the spec, what
* should happen is to indicate that 2n+1 is going to be
* transferred and transfer 2n+2 bytes where the last byte is
* padding.
*
* In practice, this doesn't happen. ATAPI devices first
* indicate and transfer 2n bytes and then indicate and
* transfer 2 bytes where the last byte is padding.
*
* This inconsistency confuses several controllers which
* perform PIO using DMA such as Intel AHCIs and sil3124/32.
* These controllers use actual number of transferred bytes to
* update DMA poitner and transfer of 4n+2 bytes make those
* controller push DMA pointer by 4n+4 bytes because SATA data
* FISes are aligned to 4 bytes. This causes data corruption
* and buffer overrun.
*
* Always setting nbytes to even number solves this problem
* because then ATAPI devices don't have to split data at 2n
* boundaries.
*/
if (nbytes & 0x1)
nbytes++;
qc->tf.lbam = (nbytes & 0xFF);
qc->tf.lbah = (nbytes >> 8);
if (nodata)
qc->tf.protocol = ATAPI_PROT_NODATA;
else if (using_pio)
qc->tf.protocol = ATAPI_PROT_PIO;
else {
/* DMA data xfer */
qc->tf.protocol = ATAPI_PROT_DMA;
qc->tf.feature |= ATAPI_PKT_DMA;
if ((dev->flags & ATA_DFLAG_DMADIR) &&
(scmd->sc_data_direction != DMA_TO_DEVICE))
/* some SATA bridges need us to indicate data xfer direction */
qc->tf.feature |= ATAPI_DMADIR;
}
/* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE
as ATAPI tape drives don't get this right otherwise */
return 0;
}
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
{
if (!sata_pmp_attached(ap)) {
if (likely(devno < ata_link_max_devices(&ap->link)))
return &ap->link.device[devno];
} else {
if (likely(devno < ap->nr_pmp_links))
return &ap->pmp_link[devno].device[0];
}
return NULL;
}
static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
const struct scsi_device *scsidev)
{
int devno;
/* skip commands not addressed to targets we simulate */
if (!sata_pmp_attached(ap)) {
if (unlikely(scsidev->channel || scsidev->lun))
return NULL;
devno = scsidev->id;
} else {
if (unlikely(scsidev->id || scsidev->lun))
return NULL;
devno = scsidev->channel;
}
return ata_find_dev(ap, devno);
}
/**
* ata_scsi_find_dev - lookup ata_device from scsi_cmnd
* @ap: ATA port to which the device is attached
* @scsidev: SCSI device from which we derive the ATA device
*
* Given various information provided in struct scsi_cmnd,
* map that onto an ATA bus, and using that mapping
* determine which ata_device is associated with the
* SCSI command to be sent.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Associated ATA device, or %NULL if not found.
*/
static struct ata_device *
ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
{
struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
if (unlikely(!dev || !ata_dev_enabled(dev)))
return NULL;
return dev;
}
/*
* ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
* @byte1: Byte 1 from pass-thru CDB.
*
* RETURNS:
* ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
*/
static u8
ata_scsi_map_proto(u8 byte1)
{
switch((byte1 & 0x1e) >> 1) {
case 3: /* Non-data */
return ATA_PROT_NODATA;
case 6: /* DMA */
case 10: /* UDMA Data-in */
case 11: /* UDMA Data-Out */
return ATA_PROT_DMA;
case 4: /* PIO Data-in */
case 5: /* PIO Data-out */
return ATA_PROT_PIO;
case 0: /* Hard Reset */
case 1: /* SRST */
case 8: /* Device Diagnostic */
case 9: /* Device Reset */
case 7: /* DMA Queued */
case 12: /* FPDMA */
case 15: /* Return Response Info */
default: /* Reserved */
break;
}
return ATA_PROT_UNKNOWN;
}
/**
* ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
* @qc: command structure to be initialized
*
* Handles either 12 or 16-byte versions of the CDB.
*
* RETURNS:
* Zero on success, non-zero on failure.
*/
static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &(qc->tf);
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
goto invalid_fld;
/*
* 12 and 16 byte CDBs use different offsets to
* provide the various register values.
*/
if (cdb[0] == ATA_16) {
/*
* 16-byte CDB - may contain extended commands.
*
* If that is the case, copy the upper byte register values.
*/
if (cdb[1] & 0x01) {
tf->hob_feature = cdb[3];
tf->hob_nsect = cdb[5];
tf->hob_lbal = cdb[7];
tf->hob_lbam = cdb[9];
tf->hob_lbah = cdb[11];
tf->flags |= ATA_TFLAG_LBA48;
} else
tf->flags &= ~ATA_TFLAG_LBA48;
/*
* Always copy low byte, device and command registers.
*/
tf->feature = cdb[4];
tf->nsect = cdb[6];
tf->lbal = cdb[8];
tf->lbam = cdb[10];
tf->lbah = cdb[12];
tf->device = cdb[13];
tf->command = cdb[14];
} else {
/*
* 12-byte CDB - incapable of extended commands.
*/
tf->flags &= ~ATA_TFLAG_LBA48;
tf->feature = cdb[3];
tf->nsect = cdb[4];
tf->lbal = cdb[5];
tf->lbam = cdb[6];
tf->lbah = cdb[7];
tf->device = cdb[8];
tf->command = cdb[9];
}
/* enforce correct master/slave bit */
tf->device = dev->devno ?
tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
/* READ/WRITE LONG use a non-standard sect_size */
qc->sect_size = ATA_SECT_SIZE;
switch (tf->command) {
case ATA_CMD_READ_LONG:
case ATA_CMD_READ_LONG_ONCE:
case ATA_CMD_WRITE_LONG:
case ATA_CMD_WRITE_LONG_ONCE:
if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
goto invalid_fld;
qc->sect_size = scsi_bufflen(scmd);
}
/*
* Set flags so that all registers will be written, pass on
* write indication (used for PIO/DMA setup), result TF is
* copied back and we don't whine too much about its failure.
*/
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scmd->sc_data_direction == DMA_TO_DEVICE)
tf->flags |= ATA_TFLAG_WRITE;
qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
/*
* Set transfer length.
*
* TODO: find out if we need to do more here to
* cover scatter/gather case.
*/
ata_qc_set_pc_nbytes(qc);
/* We may not issue DMA commands if no DMA mode is set */
if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
goto invalid_fld;
/* sanity check for pio multi commands */
if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
goto invalid_fld;
if (is_multi_taskfile(tf)) {
unsigned int multi_count = 1 << (cdb[1] >> 5);
/* compare the passed through multi_count
* with the cached multi_count of libata
*/
if (multi_count != dev->multi_count)
ata_dev_printk(dev, KERN_WARNING,
"invalid multi_count %u ignored\n",
multi_count);
}
/*
* Filter SET_FEATURES - XFER MODE command -- otherwise,
* SET_FEATURES - XFER MODE must be preceded/succeeded
* by an update to hardware-specific registers for each
* controller (i.e. the reason for ->set_piomode(),
* ->set_dmamode(), and ->post_set_mode() hooks).
*/
if (tf->command == ATA_CMD_SET_FEATURES &&
tf->feature == SETFEATURES_XFER)
goto invalid_fld;
/*
* Filter TPM commands by default. These provide an
* essentially uncontrolled encrypted "back door" between
* applications and the disk. Set libata.allow_tpm=1 if you
* have a real reason for wanting to use them. This ensures
* that installed software cannot easily mess stuff up without
* user intent. DVR type users will probably ship with this enabled
* for movie content management.
*
* Note that for ATA8 we can issue a DCS change and DCS freeze lock
* for this and should do in future but that it is not sufficient as
* DCS is an optional feature set. Thus we also do the software filter
* so that we comply with the TC consortium stated goal that the user
* can turn off TC features of their system.
*/
if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
goto invalid_fld;
return 0;
invalid_fld:
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
/* "Invalid field in cdb" */
return 1;
}
static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
u64 block;
u32 n_block;
u32 size;
void *buf;
/* we may not issue DMA commands if no DMA mode is set */
if (unlikely(!dev->dma_mode))
goto invalid_fld;
if (unlikely(scmd->cmd_len < 16))
goto invalid_fld;
scsi_16_lba_len(cdb, &block, &n_block);
/* for now we only support WRITE SAME with the unmap bit set */
if (unlikely(!(cdb[1] & 0x8)))
goto invalid_fld;
/*
* WRITE SAME always has a sector sized buffer as payload, this
* should never be a multiple entry S/G list.
*/
if (!scsi_sg_count(scmd))
goto invalid_fld;
buf = page_address(sg_page(scsi_sglist(scmd)));
size = ata_set_lba_range_entries(buf, 512, block, n_block);
tf->protocol = ATA_PROT_DMA;
tf->hob_feature = 0;
tf->feature = ATA_DSM_TRIM;
tf->hob_nsect = (size / 512) >> 8;
tf->nsect = size / 512;
tf->command = ATA_CMD_DSM;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
ATA_TFLAG_WRITE;
ata_qc_set_pc_nbytes(qc);
return 0;
invalid_fld:
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
/* "Invalid field in cdb" */
return 1;
}
/**
* ata_get_xlat_func - check if SCSI to ATA translation is possible
* @dev: ATA device
* @cmd: SCSI command opcode to consider
*
* Look up the SCSI command given, and determine whether the
* SCSI command is to be translated or simulated.
*
* RETURNS:
* Pointer to translation function if possible, %NULL if not.
*/
static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
{
switch (cmd) {
case READ_6:
case READ_10:
case READ_16:
case WRITE_6:
case WRITE_10:
case WRITE_16:
return ata_scsi_rw_xlat;
case WRITE_SAME_16:
return ata_scsi_write_same_xlat;
case SYNCHRONIZE_CACHE:
if (ata_try_flush_cache(dev))
return ata_scsi_flush_xlat;
break;
case VERIFY:
case VERIFY_16:
return ata_scsi_verify_xlat;
case ATA_12:
case ATA_16:
return ata_scsi_pass_thru;
case START_STOP:
return ata_scsi_start_stop_xlat;
}
return NULL;
}
/**
* ata_scsi_dump_cdb - dump SCSI command contents to dmesg
* @ap: ATA port to which the command was being sent
* @cmd: SCSI command to dump
*
* Prints the contents of a SCSI command via printk().
*/
static inline void ata_scsi_dump_cdb(struct ata_port *ap,
struct scsi_cmnd *cmd)
{
#ifdef ATA_DEBUG
struct scsi_device *scsidev = cmd->device;
u8 *scsicmd = cmd->cmnd;
DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
ap->print_id,
scsidev->channel, scsidev->id, scsidev->lun,
scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
scsicmd[8]);
#endif
}
static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
void (*done)(struct scsi_cmnd *),
struct ata_device *dev)
{
u8 scsi_op = scmd->cmnd[0];
ata_xlat_func_t xlat_func;
int rc = 0;
if (dev->class == ATA_DEV_ATA) {
if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
goto bad_cdb_len;
xlat_func = ata_get_xlat_func(dev, scsi_op);
} else {
if (unlikely(!scmd->cmd_len))
goto bad_cdb_len;
xlat_func = NULL;
if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
/* relay SCSI command to ATAPI device */
int len = COMMAND_SIZE(scsi_op);
if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
goto bad_cdb_len;
xlat_func = atapi_xlat;
} else {
/* ATA_16 passthru, treat as an ATA command */
if (unlikely(scmd->cmd_len > 16))
goto bad_cdb_len;
xlat_func = ata_get_xlat_func(dev, scsi_op);
}
}
if (xlat_func)
rc = ata_scsi_translate(dev, scmd, done, xlat_func);
else
ata_scsi_simulate(dev, scmd, done);
return rc;
bad_cdb_len:
DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
scmd->cmd_len, scsi_op, dev->cdb_len);
scmd->result = DID_ERROR << 16;
done(scmd);
return 0;
}
/**
* ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
* @cmd: SCSI command to be sent
* @done: Completion function, called when command is complete
*
* In some cases, this function translates SCSI commands into
* ATA taskfiles, and queues the taskfiles to be sent to
* hardware. In other cases, this function simulates a
* SCSI device by evaluating and responding to certain
* SCSI commands. This creates the overall effect of
* ATA and ATAPI devices appearing as SCSI devices.
*
* LOCKING:
* Releases scsi-layer-held lock, and obtains host lock.
*
* RETURNS:
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
* 0 otherwise.
*/
int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct ata_port *ap;
struct ata_device *dev;
struct scsi_device *scsidev = cmd->device;
struct Scsi_Host *shost = scsidev->host;
int rc = 0;
ap = ata_shost_to_port(shost);
spin_unlock(shost->host_lock);
spin_lock(ap->lock);
ata_scsi_dump_cdb(ap, cmd);
dev = ata_scsi_find_dev(ap, scsidev);
if (likely(dev))
rc = __ata_scsi_queuecmd(cmd, done, dev);
else {
cmd->result = (DID_BAD_TARGET << 16);
done(cmd);
}
spin_unlock(ap->lock);
spin_lock(shost->host_lock);
return rc;
}
/**
* ata_scsi_simulate - simulate SCSI command on ATA device
* @dev: the target device
* @cmd: SCSI command being sent to device.
* @done: SCSI command completion function.
*
* Interprets and directly executes a select list of SCSI commands
* that can be handled internally.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct ata_scsi_args args;
const u8 *scsicmd = cmd->cmnd;
u8 tmp8;
args.dev = dev;
args.id = dev->id;
args.cmd = cmd;
args.done = done;
switch(scsicmd[0]) {
/* TODO: worth improving? */
case FORMAT_UNIT:
ata_scsi_invalid_field(cmd, done);
break;
case INQUIRY:
if (scsicmd[1] & 2) /* is CmdDt set? */
ata_scsi_invalid_field(cmd, done);
else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
else switch (scsicmd[2]) {
case 0x00:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
break;
case 0x80:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
break;
case 0x83:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
break;
case 0x89:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
break;
case 0xb0:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
break;
case 0xb1:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
break;
default:
ata_scsi_invalid_field(cmd, done);
break;
}
break;
case MODE_SENSE:
case MODE_SENSE_10:
ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
break;
case MODE_SELECT: /* unconditionally return */
case MODE_SELECT_10: /* bad-field-in-cdb */
ata_scsi_invalid_field(cmd, done);
break;
case READ_CAPACITY:
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
break;
case SERVICE_ACTION_IN:
if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
else
ata_scsi_invalid_field(cmd, done);
break;
case REPORT_LUNS:
ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
break;
case REQUEST_SENSE:
ata_scsi_set_sense(cmd, 0, 0, 0);
cmd->result = (DRIVER_SENSE << 24);
done(cmd);
break;
/* if we reach this, then writeback caching is disabled,
* turning this into a no-op.
*/
case SYNCHRONIZE_CACHE:
/* fall through */
/* no-op's, complete with success */
case REZERO_UNIT:
case SEEK_6:
case SEEK_10:
case TEST_UNIT_READY:
ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
break;
case SEND_DIAGNOSTIC:
tmp8 = scsicmd[1] & ~(1 << 3);
if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
else
ata_scsi_invalid_field(cmd, done);
break;
/* all other commands */
default:
ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
/* "Invalid command operation code" */
done(cmd);
break;
}
}
int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
{
int i, rc;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
struct Scsi_Host *shost;
rc = -ENOMEM;
shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
if (!shost)
goto err_alloc;
*(struct ata_port **)&shost->hostdata[0] = ap;
ap->scsi_host = shost;
shost->transportt = &ata_scsi_transport_template;
shost->unique_id = ap->print_id;
shost->max_id = 16;
shost->max_lun = 1;
shost->max_channel = 1;
shost->max_cmd_len = 16;
/* Schedule policy is determined by ->qc_defer()
* callback and it needs to see every deferred qc.
* Set host_blocked to 1 to prevent SCSI midlayer from
* automatically deferring requests.
*/
shost->max_host_blocked = 1;
rc = scsi_add_host(ap->scsi_host, ap->host->dev);
if (rc)
goto err_add;
}
return 0;
err_add:
scsi_host_put(host->ports[i]->scsi_host);
err_alloc:
while (--i >= 0) {
struct Scsi_Host *shost = host->ports[i]->scsi_host;
scsi_remove_host(shost);
scsi_host_put(shost);
}
return rc;
}
void ata_scsi_scan_host(struct ata_port *ap, int sync)
{
int tries = 5;
struct ata_device *last_failed_dev = NULL;
struct ata_link *link;
struct ata_device *dev;
repeat:
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ENABLED) {
struct scsi_device *sdev;
int channel = 0, id = 0;
if (dev->sdev)
continue;
if (ata_is_host_link(link))
id = dev->devno;
else
channel = link->pmp;
sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
NULL);
if (!IS_ERR(sdev)) {
dev->sdev = sdev;
scsi_device_put(sdev);
}
}
}
/* If we scanned while EH was in progress or allocation
* failure occurred, scan would have failed silently. Check
* whether all devices are attached.
*/
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ENABLED) {
if (!dev->sdev)
goto exit_loop;
}
}
exit_loop:
if (!link)
return;
/* we're missing some SCSI devices */
if (sync) {
/* If caller requested synchrnous scan && we've made
* any progress, sleep briefly and repeat.
*/
if (dev != last_failed_dev) {
msleep(100);
last_failed_dev = dev;
goto repeat;
}
/* We might be failing to detect boot device, give it
* a few more chances.
*/
if (--tries) {
msleep(100);
goto repeat;
}
ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan "
"failed without making any progress,\n"
" switching to async\n");
}
queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
round_jiffies_relative(HZ));
}
/**
* ata_scsi_offline_dev - offline attached SCSI device
* @dev: ATA device to offline attached SCSI device for
*
* This function is called from ata_eh_hotplug() and responsible
* for taking the SCSI device attached to @dev offline. This
* function is called with host lock which protects dev->sdev
* against clearing.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 1 if attached SCSI device exists, 0 otherwise.
*/
int ata_scsi_offline_dev(struct ata_device *dev)
{
if (dev->sdev) {
scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
return 1;
}
return 0;
}
/**
* ata_scsi_remove_dev - remove attached SCSI device
* @dev: ATA device to remove attached SCSI device for
*
* This function is called from ata_eh_scsi_hotplug() and
* responsible for removing the SCSI device attached to @dev.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
static void ata_scsi_remove_dev(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
struct scsi_device *sdev;
unsigned long flags;
/* Alas, we need to grab scan_mutex to ensure SCSI device
* state doesn't change underneath us and thus
* scsi_device_get() always succeeds. The mutex locking can
* be removed if there is __scsi_device_get() interface which
* increments reference counts regardless of device state.
*/
mutex_lock(&ap->scsi_host->scan_mutex);
spin_lock_irqsave(ap->lock, flags);
/* clearing dev->sdev is protected by host lock */
sdev = dev->sdev;
dev->sdev = NULL;
if (sdev) {
/* If user initiated unplug races with us, sdev can go
* away underneath us after the host lock and
* scan_mutex are released. Hold onto it.
*/
if (scsi_device_get(sdev) == 0) {
/* The following ensures the attached sdev is
* offline on return from ata_scsi_offline_dev()
* regardless it wins or loses the race
* against this function.
*/
scsi_device_set_state(sdev, SDEV_OFFLINE);
} else {
WARN_ON(1);
sdev = NULL;
}
}
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_host->scan_mutex);
if (sdev) {
ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
dev_name(&sdev->sdev_gendev));
scsi_remove_device(sdev);
scsi_device_put(sdev);
}
}
static void ata_scsi_handle_link_detach(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
ata_for_each_dev(dev, link, ALL) {
unsigned long flags;
if (!(dev->flags & ATA_DFLAG_DETACHED))
continue;
spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_DETACHED;
spin_unlock_irqrestore(ap->lock, flags);
ata_scsi_remove_dev(dev);
}
}
/**
* ata_scsi_media_change_notify - send media change event
* @dev: Pointer to the disk device with media change event
*
* Tell the block layer to send a media change notification
* event.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_scsi_media_change_notify(struct ata_device *dev)
{
if (dev->sdev)
sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
GFP_ATOMIC);
}
/**
* ata_scsi_hotplug - SCSI part of hotplug
* @work: Pointer to ATA port to perform SCSI hotplug on
*
* Perform SCSI part of hotplug. It's executed from a separate
* workqueue after EH completes. This is necessary because SCSI
* hot plugging requires working EH and hot unplugging is
* synchronized with hot plugging with a mutex.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_scsi_hotplug(struct work_struct *work)
{
struct ata_port *ap =
container_of(work, struct ata_port, hotplug_task.work);
int i;
if (ap->pflags & ATA_PFLAG_UNLOADING) {
DPRINTK("ENTER/EXIT - unloading\n");
return;
}
DPRINTK("ENTER\n");
/* Unplug detached devices. We cannot use link iterator here
* because PMP links have to be scanned even if PMP is
* currently not attached. Iterate manually.
*/
ata_scsi_handle_link_detach(&ap->link);
if (ap->pmp_link)
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
ata_scsi_handle_link_detach(&ap->pmp_link[i]);
/* scan for new ones */
ata_scsi_scan_host(ap, 0);
DPRINTK("EXIT\n");
}
/**
* ata_scsi_user_scan - indication for user-initiated bus scan
* @shost: SCSI host to scan
* @channel: Channel to scan
* @id: ID to scan
* @lun: LUN to scan
*
* This function is called when user explicitly requests bus
* scan. Set probe pending flag and invoke EH.
*
* LOCKING:
* SCSI layer (we don't care)
*
* RETURNS:
* Zero.
*/
static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, unsigned int lun)
{
struct ata_port *ap = ata_shost_to_port(shost);
unsigned long flags;
int devno, rc = 0;
if (!ap->ops->error_handler)
return -EOPNOTSUPP;
if (lun != SCAN_WILD_CARD && lun)
return -EINVAL;
if (!sata_pmp_attached(ap)) {
if (channel != SCAN_WILD_CARD && channel)
return -EINVAL;
devno = id;
} else {
if (id != SCAN_WILD_CARD && id)
return -EINVAL;
devno = channel;
}
spin_lock_irqsave(ap->lock, flags);
if (devno == SCAN_WILD_CARD) {
struct ata_link *link;
ata_for_each_link(link, ap, EDGE) {
struct ata_eh_info *ehi = &link->eh_info;
ehi->probe_mask |= ATA_ALL_DEVICES;
ehi->action |= ATA_EH_RESET;
}
} else {
struct ata_device *dev = ata_find_dev(ap, devno);
if (dev) {
struct ata_eh_info *ehi = &dev->link->eh_info;
ehi->probe_mask |= 1 << dev->devno;
ehi->action |= ATA_EH_RESET;
} else
rc = -EINVAL;
}
if (rc == 0) {
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
} else
spin_unlock_irqrestore(ap->lock, flags);
return rc;
}
/**
* ata_scsi_dev_rescan - initiate scsi_rescan_device()
* @work: Pointer to ATA port to perform scsi_rescan_device()
*
* After ATA pass thru (SAT) commands are executed successfully,
* libata need to propagate the changes to SCSI layer. This
* function must be executed from ata_aux_wq such that sdev
* attach/detach don't race with rescan.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_scsi_dev_rescan(struct work_struct *work)
{
struct ata_port *ap =
container_of(work, struct ata_port, scsi_rescan_task);
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ENABLED) {
struct scsi_device *sdev = dev->sdev;
if (!sdev)
continue;
if (scsi_device_get(sdev))
continue;
spin_unlock_irqrestore(ap->lock, flags);
scsi_rescan_device(&(sdev->sdev_gendev));
scsi_device_put(sdev);
spin_lock_irqsave(ap->lock, flags);
}
}
spin_unlock_irqrestore(ap->lock, flags);
}
/**
* ata_sas_port_alloc - Allocate port for a SAS attached SATA device
* @host: ATA host container for all SAS ports
* @port_info: Information from low-level host driver
* @shost: SCSI host that the scsi device is attached to
*
* LOCKING:
* PCI/etc. bus probe sem.
*
* RETURNS:
* ata_port pointer on success / NULL on failure.
*/
struct ata_port *ata_sas_port_alloc(struct ata_host *host,
struct ata_port_info *port_info,
struct Scsi_Host *shost)
{
struct ata_port *ap;
ap = ata_port_alloc(host);
if (!ap)
return NULL;
ap->port_no = 0;
ap->lock = shost->host_lock;
ap->pio_mask = port_info->pio_mask;
ap->mwdma_mask = port_info->mwdma_mask;
ap->udma_mask = port_info->udma_mask;
ap->flags |= port_info->flags;
ap->ops = port_info->port_ops;
ap->cbl = ATA_CBL_SATA;
return ap;
}
EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
/**
* ata_sas_port_start - Set port up for dma.
* @ap: Port to initialize
*
* Called just after data structures for each port are
* initialized.
*
* May be used as the port_start() entry in ata_port_operations.
*
* LOCKING:
* Inherited from caller.
*/
int ata_sas_port_start(struct ata_port *ap)
{
return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_port_start);
/**
* ata_port_stop - Undo ata_sas_port_start()
* @ap: Port to shut down
*
* May be used as the port_stop() entry in ata_port_operations.
*
* LOCKING:
* Inherited from caller.
*/
void ata_sas_port_stop(struct ata_port *ap)
{
}
EXPORT_SYMBOL_GPL(ata_sas_port_stop);
/**
* ata_sas_port_init - Initialize a SATA device
* @ap: SATA port to initialize
*
* LOCKING:
* PCI/etc. bus probe sem.
*
* RETURNS:
* Zero on success, non-zero on error.
*/
int ata_sas_port_init(struct ata_port *ap)
{
int rc = ap->ops->port_start(ap);
if (!rc) {
ap->print_id = ata_print_id++;
rc = ata_bus_probe(ap);
}
return rc;
}
EXPORT_SYMBOL_GPL(ata_sas_port_init);
/**
* ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
* @ap: SATA port to destroy
*
*/
void ata_sas_port_destroy(struct ata_port *ap)
{
if (ap->ops->port_stop)
ap->ops->port_stop(ap);
kfree(ap);
}
EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
/**
* ata_sas_slave_configure - Default slave_config routine for libata devices
* @sdev: SCSI device to configure
* @ap: ATA port to which SCSI device is attached
*
* RETURNS:
* Zero.
*/
int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
ata_scsi_dev_config(sdev, ap->link.device);
return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
* @cmd: SCSI command to be sent
* @done: Completion function, called when command is complete
* @ap: ATA port to which the command is being sent
*
* RETURNS:
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
* 0 otherwise.
*/
int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
struct ata_port *ap)
{
int rc = 0;
ata_scsi_dump_cdb(ap, cmd);
if (likely(ata_dev_enabled(ap->link.device)))
rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
else {
cmd->result = (DID_BAD_TARGET << 16);
done(cmd);
}
return rc;
}
EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
| Rashed97/android_kernel_sony_marvell | drivers/ata/libata-scsi.c | C | gpl-2.0 | 100,260 |
/* gameplaySP
*
* Copyright (C) 2006 Exophase <exophase@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "common.h"
#include "font.h"
#ifdef PSP_BUILD
#include <pspctrl.h>
#include <pspkernel.h>
#include <pspdebug.h>
#include <pspdisplay.h>
#include <pspgu.h>
#include <psppower.h>
#include <psprtc.h>
static float *screen_vertex = (float *)0x441FC100;
static u32 *ge_cmd = (u32 *)0x441FC000;
static u16 *psp_gu_vram_base = (u16 *)(0x44000000);
static u32 *ge_cmd_ptr = (u32 *)0x441FC000;
static u32 gecbid;
static u32 video_direct = 0;
static u32 __attribute__((aligned(16))) display_list[32];
#define GBA_SCREEN_WIDTH 240
#define GBA_SCREEN_HEIGHT 160
#define PSP_SCREEN_WIDTH 480
#define PSP_SCREEN_HEIGHT 272
#define PSP_LINE_SIZE 512
#define PSP_ALL_BUTTON_MASK 0xFFFF
#define GE_CMD_FBP 0x9C
#define GE_CMD_FBW 0x9D
#define GE_CMD_TBP0 0xA0
#define GE_CMD_TBW0 0xA8
#define GE_CMD_TSIZE0 0xB8
#define GE_CMD_TFLUSH 0xCB
#define GE_CMD_CLEAR 0xD3
#define GE_CMD_VTYPE 0x12
#define GE_CMD_BASE 0x10
#define GE_CMD_VADDR 0x01
#define GE_CMD_IADDR 0x02
#define GE_CMD_PRIM 0x04
#define GE_CMD_FINISH 0x0F
#define GE_CMD_SIGNAL 0x0C
#define GE_CMD_NOP 0x00
#define GE_CMD(cmd, operand) \
*ge_cmd_ptr = (((GE_CMD_##cmd) << 24) | (operand)); \
ge_cmd_ptr++ \
static u16 *screen_texture = (u16 *)(0x4000000 + (512 * 272 * 2));
static u16 *current_screen_texture = (u16 *)(0x4000000 + (512 * 272 * 2));
static u16 *screen_pixels = (u16 *)(0x4000000 + (512 * 272 * 2));
static u32 screen_pitch = 240;
static void Ge_Finish_Callback(int id, void *arg)
{
}
#define get_screen_pixels() \
screen_pixels \
#define get_screen_pitch() \
screen_pitch \
#else
SDL_Surface *screen;
const u32 video_scale = 1;
#define get_screen_pixels() \
((u16 *)screen->pixels) \
#define get_screen_pitch() \
(screen->pitch / 2) \
#endif
void render_scanline_conditional_tile(u32 start, u32 end, u16 *scanline,
u32 enable_flags, u32 dispcnt, u32 bldcnt, tile_layer_render_struct
*layer_renderers);
void render_scanline_conditional_bitmap(u32 start, u32 end, u16 *scanline,
u32 enable_flags, u32 dispcnt, u32 bldcnt, bitmap_layer_render_struct
*layer_renderers);
#define no_op \
// This old version is not necessary if the palette is either being converted
// transparently or the ABGR 1555 format is being used natively. The direct
// version (without conversion) is much faster.
#define tile_lookup_palette_full(palette, source) \
current_pixel = palette[source]; \
convert_palette(current_pixel) \
#define tile_lookup_palette(palette, source) \
current_pixel = palette[source]; \
#define tile_expand_base_normal(index) \
tile_lookup_palette(palette, current_pixel); \
dest_ptr[index] = current_pixel \
#define tile_expand_transparent_normal(index) \
tile_expand_base_normal(index) \
#define tile_expand_copy(index) \
dest_ptr[index] = copy_ptr[index] \
#define advance_dest_ptr_base(delta) \
dest_ptr += delta \
#define advance_dest_ptr_transparent(delta) \
advance_dest_ptr_base(delta) \
#define advance_dest_ptr_copy(delta) \
advance_dest_ptr_base(delta); \
copy_ptr += delta \
#define color_combine_mask_a(layer) \
((io_registers[REG_BLDCNT] >> layer) & 0x01) \
// For color blending operations, will create a mask that has in bit
// 10 if the layer is target B, and bit 9 if the layer is target A.
#define color_combine_mask(layer) \
(color_combine_mask_a(layer) | \
((io_registers[REG_BLDCNT] >> (layer + 7)) & 0x02)) << 9 \
// For alpha blending renderers, draw the palette index (9bpp) and
// layer bits rather than the raw RGB. For the base this should write to
// the 32bit location directly.
#define tile_expand_base_alpha(index) \
dest_ptr[index] = current_pixel | pixel_combine \
#define tile_expand_base_bg(index) \
dest_ptr[index] = bg_combine \
// For layered (transparent) writes this should shift the "stack" and write
// to the bottom. This will preserve the topmost pixel and the most recent
// one.
#define tile_expand_transparent_alpha(index) \
dest_ptr[index] = (dest_ptr[index] << 16) | current_pixel | pixel_combine \
// OBJ should only shift if the top isn't already OBJ
#define tile_expand_transparent_alpha_obj(index) \
dest = dest_ptr[index]; \
if(dest & 0x00000100) \
{ \
dest_ptr[index] = (dest & 0xFFFF0000) | current_pixel | pixel_combine; \
} \
else \
{ \
dest_ptr[index] = (dest << 16) | current_pixel | pixel_combine; \
} \
// For color effects that don't need to preserve the previous layer.
// The color32 version should be used with 32bit wide dest_ptr so as to be
// compatible with alpha combine on top of it.
#define tile_expand_base_color16(index) \
dest_ptr[index] = current_pixel | pixel_combine \
#define tile_expand_transparent_color16(index) \
tile_expand_base_color16(index) \
#define tile_expand_base_color32(index) \
tile_expand_base_color16(index) \
#define tile_expand_transparent_color32(index) \
tile_expand_base_color16(index) \
// Operations for isolation 8bpp pixels within 32bpp pixel blocks.
#define tile_8bpp_pixel_op_mask(op_param) \
current_pixel = current_pixels & 0xFF \
#define tile_8bpp_pixel_op_shift_mask(shift) \
current_pixel = (current_pixels >> shift) & 0xFF \
#define tile_8bpp_pixel_op_shift(shift) \
current_pixel = current_pixels >> shift \
#define tile_8bpp_pixel_op_none(shift) \
// Base should always draw raw in 8bpp mode; color 0 will be drawn where
// color 0 is.
#define tile_8bpp_draw_base_normal(index) \
tile_expand_base_normal(index) \
#define tile_8bpp_draw_base_alpha(index) \
if(current_pixel) \
{ \
tile_expand_base_alpha(index); \
} \
else \
{ \
tile_expand_base_bg(index); \
} \
#define tile_8bpp_draw_base_color16(index) \
tile_8bpp_draw_base_alpha(index) \
#define tile_8bpp_draw_base_color32(index) \
tile_8bpp_draw_base_alpha(index) \
#define tile_8bpp_draw_base(index, op, op_param, alpha_op) \
tile_8bpp_pixel_op_##op(op_param); \
tile_8bpp_draw_base_##alpha_op(index) \
// Transparent (layered) writes should only replace what is there if the
// pixel is not transparent (zero)
#define tile_8bpp_draw_transparent(index, op, op_param, alpha_op) \
tile_8bpp_pixel_op_##op(op_param); \
if(current_pixel) \
{ \
tile_expand_transparent_##alpha_op(index); \
} \
#define tile_8bpp_draw_copy(index, op, op_param, alpha_op) \
tile_8bpp_pixel_op_##op(op_param); \
if(current_pixel) \
{ \
tile_expand_copy(index); \
} \
// Get the current tile from the map in 8bpp mode
#define get_tile_8bpp() \
current_tile = *map_ptr; \
tile_ptr = tile_base + ((current_tile & 0x3FF) * 64) \
// Draw half of a tile in 8bpp mode, for base renderer
#define tile_8bpp_draw_four_noflip(index, combine_op, alpha_op) \
tile_8bpp_draw_##combine_op(index + 0, mask, 0, alpha_op); \
tile_8bpp_draw_##combine_op(index + 1, shift_mask, 8, alpha_op); \
tile_8bpp_draw_##combine_op(index + 2, shift_mask, 16, alpha_op); \
tile_8bpp_draw_##combine_op(index + 3, shift, 24, alpha_op) \
// Like the above, but draws the half-tile horizontally flipped
#define tile_8bpp_draw_four_flip(index, combine_op, alpha_op) \
tile_8bpp_draw_##combine_op(index + 3, mask, 0, alpha_op); \
tile_8bpp_draw_##combine_op(index + 2, shift_mask, 8, alpha_op); \
tile_8bpp_draw_##combine_op(index + 1, shift_mask, 16, alpha_op); \
tile_8bpp_draw_##combine_op(index + 0, shift, 24, alpha_op) \
#define tile_8bpp_draw_four_base(index, alpha_op, flip_op) \
tile_8bpp_draw_four_##flip_op(index, base, alpha_op) \
// Draw half of a tile in 8bpp mode, for transparent renderer; as an
// optimization the entire thing is checked against zero (in transparent
// capable renders it is more likely for the pixels to be transparent than
// opaque)
#define tile_8bpp_draw_four_transparent(index, alpha_op, flip_op) \
if(current_pixels != 0) \
{ \
tile_8bpp_draw_four_##flip_op(index, transparent, alpha_op); \
} \
#define tile_8bpp_draw_four_copy(index, alpha_op, flip_op) \
if(current_pixels != 0) \
{ \
tile_8bpp_draw_four_##flip_op(index, copy, alpha_op); \
} \
// Helper macro for drawing 8bpp tiles clipped against the edge of the screen
#define partial_tile_8bpp(combine_op, alpha_op) \
for(i = 0; i < partial_tile_run; i++) \
{ \
tile_8bpp_draw_##combine_op(0, mask, 0, alpha_op); \
current_pixels >>= 8; \
advance_dest_ptr_##combine_op(1); \
} \
// Draws 8bpp tiles clipped against the left side of the screen,
// partial_tile_offset indicates how much clipped in it is, partial_tile_run
// indicates how much it should draw.
#define partial_tile_right_noflip_8bpp(combine_op, alpha_op) \
if(partial_tile_offset >= 4) \
{ \
current_pixels = *((u32 *)(tile_ptr + 4)) >> \
((partial_tile_offset - 4) * 8); \
partial_tile_8bpp(combine_op, alpha_op); \
} \
else \
{ \
partial_tile_run -= 4; \
current_pixels = *((u32 *)tile_ptr) >> (partial_tile_offset * 8); \
partial_tile_8bpp(combine_op, alpha_op); \
current_pixels = *((u32 *)(tile_ptr + 4)); \
tile_8bpp_draw_four_##combine_op(0, alpha_op, noflip); \
advance_dest_ptr_##combine_op(4); \
} \
// Draws 8bpp tiles clipped against both the left and right side of the
// screen, IE, runs of less than 8 - partial_tile_offset.
#define partial_tile_mid_noflip_8bpp(combine_op, alpha_op) \
if(partial_tile_offset >= 4) \
{ \
current_pixels = *((u32 *)(tile_ptr + 4)) >> \
((partial_tile_offset - 4) * 8); \
partial_tile_8bpp(combine_op, alpha_op); \
} \
else \
{ \
current_pixels = *((u32 *)tile_ptr) >> (partial_tile_offset * 8); \
if((partial_tile_offset + partial_tile_run) > 4) \
{ \
u32 old_run = partial_tile_run; \
partial_tile_run = 4 - partial_tile_offset; \
partial_tile_8bpp(combine_op, alpha_op); \
partial_tile_run = old_run - partial_tile_run; \
current_pixels = *((u32 *)(tile_ptr + 4)); \
partial_tile_8bpp(combine_op, alpha_op); \
} \
else \
{ \
partial_tile_8bpp(combine_op, alpha_op); \
} \
} \
// Draws 8bpp tiles clipped against the right side of the screen,
// partial_tile_run indicates how much there is to draw.
#define partial_tile_left_noflip_8bpp(combine_op, alpha_op) \
if(partial_tile_run >= 4) \
{ \
current_pixels = *((u32 *)tile_ptr); \
tile_8bpp_draw_four_##combine_op(0, alpha_op, noflip); \
advance_dest_ptr_##combine_op(4); \
tile_ptr += 4; \
partial_tile_run -= 4; \
} \
\
current_pixels = *((u32 *)(tile_ptr)); \
partial_tile_8bpp(combine_op, alpha_op) \
// Draws a non-clipped (complete) 8bpp tile.
#define tile_noflip_8bpp(combine_op, alpha_op) \
current_pixels = *((u32 *)tile_ptr); \
tile_8bpp_draw_four_##combine_op(0, alpha_op, noflip); \
current_pixels = *((u32 *)(tile_ptr + 4)); \
tile_8bpp_draw_four_##combine_op(4, alpha_op, noflip) \
// Like the above versions but draws flipped tiles.
#define partial_tile_flip_8bpp(combine_op, alpha_op) \
for(i = 0; i < partial_tile_run; i++) \
{ \
tile_8bpp_draw_##combine_op(0, shift, 24, alpha_op); \
current_pixels <<= 8; \
advance_dest_ptr_##combine_op(1); \
} \
#define partial_tile_right_flip_8bpp(combine_op, alpha_op) \
if(partial_tile_offset >= 4) \
{ \
current_pixels = *((u32 *)tile_ptr) << ((partial_tile_offset - 4) * 8); \
partial_tile_flip_8bpp(combine_op, alpha_op); \
} \
else \
{ \
partial_tile_run -= 4; \
current_pixels = *((u32 *)(tile_ptr + 4)) << \
((partial_tile_offset - 4) * 8); \
partial_tile_flip_8bpp(combine_op, alpha_op); \
current_pixels = *((u32 *)tile_ptr); \
tile_8bpp_draw_four_##combine_op(0, alpha_op, flip); \
advance_dest_ptr_##combine_op(4); \
} \
#define partial_tile_mid_flip_8bpp(combine_op, alpha_op) \
if(partial_tile_offset >= 4) \
{ \
current_pixels = *((u32 *)tile_ptr) << ((partial_tile_offset - 4) * 8); \
partial_tile_flip_8bpp(combine_op, alpha_op); \
} \
else \
{ \
current_pixels = *((u32 *)(tile_ptr + 4)) << \
((partial_tile_offset - 4) * 8); \
\
if((partial_tile_offset + partial_tile_run) > 4) \
{ \
u32 old_run = partial_tile_run; \
partial_tile_run = 4 - partial_tile_offset; \
partial_tile_flip_8bpp(combine_op, alpha_op); \
partial_tile_run = old_run - partial_tile_run; \
current_pixels = *((u32 *)(tile_ptr)); \
partial_tile_flip_8bpp(combine_op, alpha_op); \
} \
else \
{ \
partial_tile_flip_8bpp(combine_op, alpha_op); \
} \
} \
#define partial_tile_left_flip_8bpp(combine_op, alpha_op) \
if(partial_tile_run >= 4) \
{ \
current_pixels = *((u32 *)(tile_ptr + 4)); \
tile_8bpp_draw_four_##combine_op(0, alpha_op, flip); \
advance_dest_ptr_##combine_op(4); \
tile_ptr -= 4; \
partial_tile_run -= 4; \
} \
\
current_pixels = *((u32 *)(tile_ptr + 4)); \
partial_tile_flip_8bpp(combine_op, alpha_op) \
#define tile_flip_8bpp(combine_op, alpha_op) \
current_pixels = *((u32 *)(tile_ptr + 4)); \
tile_8bpp_draw_four_##combine_op(0, alpha_op, flip); \
current_pixels = *((u32 *)tile_ptr); \
tile_8bpp_draw_four_##combine_op(4, alpha_op, flip) \
// Operations for isolating 4bpp tiles in a 32bit block
#define tile_4bpp_pixel_op_mask(op_param) \
current_pixel = current_pixels & 0x0F \
#define tile_4bpp_pixel_op_shift_mask(shift) \
current_pixel = (current_pixels >> shift) & 0x0F \
#define tile_4bpp_pixel_op_shift(shift) \
current_pixel = current_pixels >> shift \
#define tile_4bpp_pixel_op_none(op_param) \
// Draws a single 4bpp pixel as base, normal renderer; checks to see if the
// pixel is zero because if so the current palette should not be applied.
// These ifs can be replaced with a lookup table, may or may not be superior
// this way, should be benchmarked. The lookup table would be from 0-255
// identity map except for multiples of 16, which would map to 0.
#define tile_4bpp_draw_base_normal(index) \
if(current_pixel) \
{ \
current_pixel |= current_palette; \
tile_expand_base_normal(index); \
} \
else \
{ \
tile_expand_base_normal(index); \
} \
#define tile_4bpp_draw_base_alpha(index) \
if(current_pixel) \
{ \
current_pixel |= current_palette; \
tile_expand_base_alpha(index); \
} \
else \
{ \
tile_expand_base_bg(index); \
} \
#define tile_4bpp_draw_base_color16(index) \
tile_4bpp_draw_base_alpha(index) \
#define tile_4bpp_draw_base_color32(index) \
tile_4bpp_draw_base_alpha(index) \
#define tile_4bpp_draw_base(index, op, op_param, alpha_op) \
tile_4bpp_pixel_op_##op(op_param); \
tile_4bpp_draw_base_##alpha_op(index) \
// Draws a single 4bpp pixel as layered, if not transparent.
#define tile_4bpp_draw_transparent(index, op, op_param, alpha_op) \
tile_4bpp_pixel_op_##op(op_param); \
if(current_pixel) \
{ \
current_pixel |= current_palette; \
tile_expand_transparent_##alpha_op(index); \
} \
#define tile_4bpp_draw_copy(index, op, op_param, alpha_op) \
tile_4bpp_pixel_op_##op(op_param); \
if(current_pixel) \
{ \
current_pixel |= current_palette; \
tile_expand_copy(index); \
} \
// Draws eight background pixels in transparent mode, for alpha or normal
// renderers.
#define tile_4bpp_draw_eight_base_zero(value) \
dest_ptr[0] = value; \
dest_ptr[1] = value; \
dest_ptr[2] = value; \
dest_ptr[3] = value; \
dest_ptr[4] = value; \
dest_ptr[5] = value; \
dest_ptr[6] = value; \
dest_ptr[7] = value \
// Draws eight background pixels for the alpha renderer, basically color zero
// with the background flag high.
#define tile_4bpp_draw_eight_base_zero_alpha() \
tile_4bpp_draw_eight_base_zero(bg_combine) \
#define tile_4bpp_draw_eight_base_zero_color16() \
tile_4bpp_draw_eight_base_zero_alpha() \
#define tile_4bpp_draw_eight_base_zero_color32() \
tile_4bpp_draw_eight_base_zero_alpha() \
// Draws eight background pixels for the normal renderer, just a bunch of
// zeros.
#define tile_4bpp_draw_eight_base_zero_normal() \
current_pixel = palette[0]; \
tile_4bpp_draw_eight_base_zero(current_pixel) \
// Draws eight 4bpp pixels.
#define tile_4bpp_draw_eight_noflip(combine_op, alpha_op) \
tile_4bpp_draw_##combine_op(0, mask, 0, alpha_op); \
tile_4bpp_draw_##combine_op(1, shift_mask, 4, alpha_op); \
tile_4bpp_draw_##combine_op(2, shift_mask, 8, alpha_op); \
tile_4bpp_draw_##combine_op(3, shift_mask, 12, alpha_op); \
tile_4bpp_draw_##combine_op(4, shift_mask, 16, alpha_op); \
tile_4bpp_draw_##combine_op(5, shift_mask, 20, alpha_op); \
tile_4bpp_draw_##combine_op(6, shift_mask, 24, alpha_op); \
tile_4bpp_draw_##combine_op(7, shift, 28, alpha_op) \
// Draws eight 4bpp pixels in reverse order (for hflip).
#define tile_4bpp_draw_eight_flip(combine_op, alpha_op) \
tile_4bpp_draw_##combine_op(7, mask, 0, alpha_op); \
tile_4bpp_draw_##combine_op(6, shift_mask, 4, alpha_op); \
tile_4bpp_draw_##combine_op(5, shift_mask, 8, alpha_op); \
tile_4bpp_draw_##combine_op(4, shift_mask, 12, alpha_op); \
tile_4bpp_draw_##combine_op(3, shift_mask, 16, alpha_op); \
tile_4bpp_draw_##combine_op(2, shift_mask, 20, alpha_op); \
tile_4bpp_draw_##combine_op(1, shift_mask, 24, alpha_op); \
tile_4bpp_draw_##combine_op(0, shift, 28, alpha_op) \
// Draws eight 4bpp pixels in base mode, checks if all are zero, if so draws
// the appropriate background pixels.
#define tile_4bpp_draw_eight_base(alpha_op, flip_op) \
if(current_pixels != 0) \
{ \
tile_4bpp_draw_eight_##flip_op(base, alpha_op); \
} \
else \
{ \
tile_4bpp_draw_eight_base_zero_##alpha_op(); \
} \
// Draws eight 4bpp pixels in transparent (layered) mode, checks if all are
// zero and if so draws nothing.
#define tile_4bpp_draw_eight_transparent(alpha_op, flip_op) \
if(current_pixels != 0) \
{ \
tile_4bpp_draw_eight_##flip_op(transparent, alpha_op); \
} \
#define tile_4bpp_draw_eight_copy(alpha_op, flip_op) \
if(current_pixels != 0) \
{ \
tile_4bpp_draw_eight_##flip_op(copy, alpha_op); \
} \
// Gets the current tile in 4bpp mode, also getting the current palette and
// the pixel block.
#define get_tile_4bpp() \
current_tile = *map_ptr; \
current_palette = (current_tile >> 12) << 4; \
tile_ptr = tile_base + ((current_tile & 0x3FF) * 32); \
// Helper macro for drawing clipped 4bpp tiles.
#define partial_tile_4bpp(combine_op, alpha_op) \
for(i = 0; i < partial_tile_run; i++) \
{ \
tile_4bpp_draw_##combine_op(0, mask, 0, alpha_op); \
current_pixels >>= 4; \
advance_dest_ptr_##combine_op(1); \
} \
// Draws a 4bpp tile clipped against the left edge of the screen.
// partial_tile_offset is how far in it's clipped, partial_tile_run is
// how many to draw.
#define partial_tile_right_noflip_4bpp(combine_op, alpha_op) \
current_pixels = *((u32 *)tile_ptr) >> (partial_tile_offset * 4); \
partial_tile_4bpp(combine_op, alpha_op) \
// Draws a 4bpp tile clipped against both edges of the screen, same as right.
#define partial_tile_mid_noflip_4bpp(combine_op, alpha_op) \
partial_tile_right_noflip_4bpp(combine_op, alpha_op) \
// Draws a 4bpp tile clipped against the right edge of the screen.
// partial_tile_offset is how many to draw.
#define partial_tile_left_noflip_4bpp(combine_op, alpha_op) \
current_pixels = *((u32 *)tile_ptr); \
partial_tile_4bpp(combine_op, alpha_op) \
// Draws a complete 4bpp tile row (not clipped)
#define tile_noflip_4bpp(combine_op, alpha_op) \
current_pixels = *((u32 *)tile_ptr); \
tile_4bpp_draw_eight_##combine_op(alpha_op, noflip) \
// Like the above, but draws flipped tiles.
#define partial_tile_flip_4bpp(combine_op, alpha_op) \
for(i = 0; i < partial_tile_run; i++) \
{ \
tile_4bpp_draw_##combine_op(0, shift, 28, alpha_op); \
current_pixels <<= 4; \
advance_dest_ptr_##combine_op(1); \
} \
#define partial_tile_right_flip_4bpp(combine_op, alpha_op) \
current_pixels = *((u32 *)tile_ptr) << (partial_tile_offset * 4); \
partial_tile_flip_4bpp(combine_op, alpha_op) \
#define partial_tile_mid_flip_4bpp(combine_op, alpha_op) \
partial_tile_right_flip_4bpp(combine_op, alpha_op) \
#define partial_tile_left_flip_4bpp(combine_op, alpha_op) \
current_pixels = *((u32 *)tile_ptr); \
partial_tile_flip_4bpp(combine_op, alpha_op) \
#define tile_flip_4bpp(combine_op, alpha_op) \
current_pixels = *((u32 *)tile_ptr); \
tile_4bpp_draw_eight_##combine_op(alpha_op, flip) \
// Draws a single (partial or complete) tile from the tilemap, flipping
// as necessary.
#define single_tile_map(tile_type, combine_op, color_depth, alpha_op) \
get_tile_##color_depth(); \
if(current_tile & 0x800) \
tile_ptr += vertical_pixel_flip; \
\
if(current_tile & 0x400) \
{ \
tile_type##_flip_##color_depth(combine_op, alpha_op); \
} \
else \
{ \
tile_type##_noflip_##color_depth(combine_op, alpha_op); \
} \
// Draws multiple sequential tiles from the tilemap, hflips and vflips as
// necessary.
#define multiple_tile_map(combine_op, color_depth, alpha_op) \
for(i = 0; i < tile_run; i++) \
{ \
single_tile_map(tile, combine_op, color_depth, alpha_op); \
advance_dest_ptr_##combine_op(8); \
map_ptr++; \
} \
// Draws a partial tile from a tilemap clipped against the left edge of the
// screen.
#define partial_tile_right_map(combine_op, color_depth, alpha_op) \
single_tile_map(partial_tile_right, combine_op, color_depth, alpha_op); \
map_ptr++ \
// Draws a partial tile from a tilemap clipped against both edges of the
// screen.
#define partial_tile_mid_map(combine_op, color_depth, alpha_op) \
single_tile_map(partial_tile_mid, combine_op, color_depth, alpha_op) \
// Draws a partial tile from a tilemap clipped against the right edge of the
// screen.
#define partial_tile_left_map(combine_op, color_depth, alpha_op) \
single_tile_map(partial_tile_left, combine_op, color_depth, alpha_op) \
// Advances a non-flipped 4bpp obj to the next tile.
#define obj_advance_noflip_4bpp() \
tile_ptr += 32 \
// Advances a non-flipped 8bpp obj to the next tile.
#define obj_advance_noflip_8bpp() \
tile_ptr += 64 \
// Advances a flipped 4bpp obj to the next tile.
#define obj_advance_flip_4bpp() \
tile_ptr -= 32 \
// Advances a flipped 8bpp obj to the next tile.
#define obj_advance_flip_8bpp() \
tile_ptr -= 64 \
// Draws multiple sequential tiles from an obj, flip_op determines if it should
// be flipped or not (set to flip or noflip)
#define multiple_tile_obj(combine_op, color_depth, alpha_op, flip_op) \
for(i = 0; i < tile_run; i++) \
{ \
tile_##flip_op##_##color_depth(combine_op, alpha_op); \
obj_advance_##flip_op##_##color_depth(); \
advance_dest_ptr_##combine_op(8); \
} \
// Draws an obj's tile clipped against the left side of the screen
#define partial_tile_right_obj(combine_op, color_depth, alpha_op, flip_op) \
partial_tile_right_##flip_op##_##color_depth(combine_op, alpha_op); \
obj_advance_##flip_op##_##color_depth() \
// Draws an obj's tile clipped against both sides of the screen
#define partial_tile_mid_obj(combine_op, color_depth, alpha_op, flip_op) \
partial_tile_mid_##flip_op##_##color_depth(combine_op, alpha_op) \
// Draws an obj's tile clipped against the right side of the screen
#define partial_tile_left_obj(combine_op, color_depth, alpha_op, flip_op) \
partial_tile_left_##flip_op##_##color_depth(combine_op, alpha_op) \
// Extra variables specific for 8bpp/4bpp tile renderers.
#define tile_extra_variables_8bpp() \
#define tile_extra_variables_4bpp() \
u32 current_palette \
// Byte lengths of complete tiles and tile rows in 4bpp and 8bpp.
#define tile_width_4bpp 4
#define tile_size_4bpp 32
#define tile_width_8bpp 8
#define tile_size_8bpp 64
// Render a single scanline of text tiles
#define tile_render(color_depth, combine_op, alpha_op) \
{ \
u32 vertical_pixel_offset = (vertical_offset % 8) * \
tile_width_##color_depth; \
u32 vertical_pixel_flip = \
((tile_size_##color_depth - tile_width_##color_depth) - \
vertical_pixel_offset) - vertical_pixel_offset; \
tile_extra_variables_##color_depth(); \
u8 *tile_base = vram + (((bg_control >> 2) & 0x03) * (1024 * 16)) + \
vertical_pixel_offset; \
u32 pixel_run = 256 - (horizontal_offset % 256); \
u32 current_tile; \
\
map_base += ((vertical_offset % 256) / 8) * 32; \
partial_tile_offset = (horizontal_offset % 8); \
\
if(pixel_run >= end) \
{ \
if(partial_tile_offset) \
{ \
partial_tile_run = 8 - partial_tile_offset; \
if(end < partial_tile_run) \
{ \
partial_tile_run = end; \
partial_tile_mid_map(combine_op, color_depth, alpha_op); \
return; \
} \
else \
{ \
end -= partial_tile_run; \
partial_tile_right_map(combine_op, color_depth, alpha_op); \
} \
} \
\
tile_run = end / 8; \
multiple_tile_map(combine_op, color_depth, alpha_op); \
\
partial_tile_run = end % 8; \
\
if(partial_tile_run) \
{ \
partial_tile_left_map(combine_op, color_depth, alpha_op); \
} \
} \
else \
{ \
if(partial_tile_offset) \
{ \
partial_tile_run = 8 - partial_tile_offset; \
partial_tile_right_map(combine_op, color_depth, alpha_op); \
} \
\
tile_run = (pixel_run - partial_tile_run) / 8; \
multiple_tile_map(combine_op, color_depth, alpha_op); \
map_ptr = second_ptr; \
end -= pixel_run; \
tile_run = end / 8; \
multiple_tile_map(combine_op, color_depth, alpha_op); \
\
partial_tile_run = end % 8; \
if(partial_tile_run) \
{ \
partial_tile_left_map(combine_op, color_depth, alpha_op); \
} \
} \
} \
#define render_scanline_dest_normal u16
#define render_scanline_dest_alpha u32
#define render_scanline_dest_alpha_obj u32
#define render_scanline_dest_color16 u16
#define render_scanline_dest_color32 u32
#define render_scanline_dest_partial_alpha u32
#define render_scanline_dest_copy_tile u16
#define render_scanline_dest_copy_bitmap u16
// If rendering a scanline that is not a target A then there's no point in
// keeping what's underneath it because it can't blend with it.
#define render_scanline_skip_alpha(bg_type, combine_op) \
if((pixel_combine & 0x00000200) == 0) \
{ \
render_scanline_##bg_type##_##combine_op##_color32(layer, \
start, end, scanline); \
return; \
} \
#define render_scanline_extra_variables_base_normal(bg_type) \
#define render_scanline_extra_variables_transparent_normal(bg_type) \
#define render_scanline_extra_variables_base_alpha(bg_type) \
u32 bg_combine = color_combine_mask(5); \
u32 pixel_combine = color_combine_mask(layer) | (bg_combine << 16); \
render_scanline_skip_alpha(bg_type, base) \
#define render_scanline_extra_variables_base_color() \
u32 bg_combine = color_combine_mask(5); \
u32 pixel_combine = color_combine_mask(layer) \
#define render_scanline_extra_variables_transparent_alpha(bg_type) \
u32 pixel_combine = color_combine_mask(layer); \
render_scanline_skip_alpha(bg_type, transparent) \
#define render_scanline_extra_variables_transparent_color() \
u32 pixel_combine = color_combine_mask(layer) \
#define render_scanline_extra_variables_base_color16(bg_type) \
render_scanline_extra_variables_base_color() \
#define render_scanline_extra_variables_transparent_color16(bg_type) \
render_scanline_extra_variables_transparent_color() \
#define render_scanline_extra_variables_base_color32(bg_type) \
render_scanline_extra_variables_base_color() \
#define render_scanline_extra_variables_transparent_color32(bg_type) \
render_scanline_extra_variables_transparent_color() \
// Map widths and heights
u32 map_widths[] = { 256, 512, 256, 512 };
u32 map_heights[] = { 256, 256, 512, 512 };
// Build text scanline rendering functions.
#define render_scanline_text_builder(combine_op, alpha_op) \
void render_scanline_text_##combine_op##_##alpha_op(u32 layer, \
u32 start, u32 end, void *scanline) \
{ \
render_scanline_extra_variables_##combine_op##_##alpha_op(text); \
u32 bg_control = io_registers[REG_BG0CNT + layer]; \
u32 map_size = (bg_control >> 14) & 0x03; \
u32 map_width = map_widths[map_size]; \
u32 map_height = map_heights[map_size]; \
u32 horizontal_offset = \
(io_registers[REG_BG0HOFS + (layer * 2)] + start) % 512; \
u32 vertical_offset = (io_registers[REG_VCOUNT] + \
io_registers[REG_BG0VOFS + (layer * 2)]) % 512; \
u32 current_pixel; \
u32 current_pixels; \
u32 partial_tile_run = 0; \
u32 partial_tile_offset; \
u32 tile_run; \
u16 *palette = palette_ram_converted; \
u32 i; \
render_scanline_dest_##alpha_op *dest_ptr = \
((render_scanline_dest_##alpha_op *)scanline) + start; \
\
u16 *map_base = (u16 *)(vram + ((bg_control >> 8) & 0x1F) * (1024 * 2)); \
u16 *map_ptr, *second_ptr; \
u8 *tile_ptr; \
\
end -= start; \
\
if((map_size & 0x02) && (vertical_offset >= 256)) \
{ \
map_base += ((map_width / 8) * 32) + \
(((vertical_offset - 256) / 8) * 32); \
} \
else \
{ \
map_base += (((vertical_offset % 256) / 8) * 32); \
} \
\
if(map_size & 0x01) \
{ \
if(horizontal_offset >= 256) \
{ \
horizontal_offset -= 256; \
map_ptr = map_base + (32 * 32) + (horizontal_offset / 8); \
second_ptr = map_base; \
} \
else \
{ \
map_ptr = map_base + (horizontal_offset / 8); \
second_ptr = map_base + (32 * 32); \
} \
} \
else \
{ \
horizontal_offset %= 256; \
map_ptr = map_base + (horizontal_offset / 8); \
second_ptr = map_base; \
} \
\
if(bg_control & 0x80) \
{ \
tile_render(8bpp, combine_op, alpha_op); \
} \
else \
{ \
tile_render(4bpp, combine_op, alpha_op); \
} \
} \
render_scanline_text_builder(base, normal);
render_scanline_text_builder(transparent, normal);
render_scanline_text_builder(base, color16);
render_scanline_text_builder(transparent, color16);
render_scanline_text_builder(base, color32);
render_scanline_text_builder(transparent, color32);
render_scanline_text_builder(base, alpha);
render_scanline_text_builder(transparent, alpha);
s32 affine_reference_x[2];
s32 affine_reference_y[2];
#define affine_render_bg_pixel_normal() \
current_pixel = palette_ram_converted[0] \
#define affine_render_bg_pixel_alpha() \
current_pixel = bg_combine \
#define affine_render_bg_pixel_color16() \
affine_render_bg_pixel_alpha() \
#define affine_render_bg_pixel_color32() \
affine_render_bg_pixel_alpha() \
#define affine_render_bg_pixel_base(alpha_op) \
affine_render_bg_pixel_##alpha_op() \
#define affine_render_bg_pixel_transparent(alpha_op) \
#define affine_render_bg_pixel_copy(alpha_op) \
#define affine_render_bg_base(alpha_op) \
dest_ptr[0] = current_pixel
#define affine_render_bg_transparent(alpha_op) \
#define affine_render_bg_copy(alpha_op) \
#define affine_render_bg_remainder_base(alpha_op) \
affine_render_bg_pixel_##alpha_op(); \
for(; i < end; i++) \
{ \
affine_render_bg_base(alpha_op); \
advance_dest_ptr_base(1); \
} \
#define affine_render_bg_remainder_transparent(alpha_op) \
#define affine_render_bg_remainder_copy(alpha_op) \
#define affine_render_next(combine_op) \
source_x += dx; \
source_y += dy; \
advance_dest_ptr_##combine_op(1) \
#define affine_render_scale_offset() \
tile_base += ((pixel_y % 8) * 8); \
map_base += (pixel_y / 8) << map_pitch \
#define affine_render_scale_pixel(combine_op, alpha_op) \
map_offset = (pixel_x / 8); \
if(map_offset != last_map_offset) \
{ \
tile_ptr = tile_base + (map_base[map_offset] * 64); \
last_map_offset = map_offset; \
} \
tile_ptr = tile_base + (map_base[(pixel_x / 8)] * 64); \
current_pixel = tile_ptr[(pixel_x % 8)]; \
tile_8bpp_draw_##combine_op(0, none, 0, alpha_op); \
affine_render_next(combine_op) \
#define affine_render_scale(combine_op, alpha_op) \
{ \
pixel_y = source_y >> 8; \
u32 i = 0; \
affine_render_bg_pixel_##combine_op(alpha_op); \
if((u32)pixel_y < (u32)width_height) \
{ \
affine_render_scale_offset(); \
for(; i < end; i++) \
{ \
pixel_x = source_x >> 8; \
\
if((u32)pixel_x < (u32)width_height) \
{ \
break; \
} \
\
affine_render_bg_##combine_op(alpha_op); \
affine_render_next(combine_op); \
} \
\
for(; i < end; i++) \
{ \
pixel_x = source_x >> 8; \
\
if((u32)pixel_x >= (u32)width_height) \
break; \
\
affine_render_scale_pixel(combine_op, alpha_op); \
} \
} \
affine_render_bg_remainder_##combine_op(alpha_op); \
} \
#define affine_render_scale_wrap(combine_op, alpha_op) \
{ \
u32 wrap_mask = width_height - 1; \
pixel_y = (source_y >> 8) & wrap_mask; \
if((u32)pixel_y < (u32)width_height) \
{ \
affine_render_scale_offset(); \
for(i = 0; i < end; i++) \
{ \
pixel_x = (source_x >> 8) & wrap_mask; \
affine_render_scale_pixel(combine_op, alpha_op); \
} \
} \
} \
#define affine_render_rotate_pixel(combine_op, alpha_op) \
map_offset = (pixel_x / 8) + ((pixel_y / 8) << map_pitch); \
if(map_offset != last_map_offset) \
{ \
tile_ptr = tile_base + (map_base[map_offset] * 64); \
last_map_offset = map_offset; \
} \
\
current_pixel = tile_ptr[(pixel_x % 8) + ((pixel_y % 8) * 8)]; \
tile_8bpp_draw_##combine_op(0, none, 0, alpha_op); \
affine_render_next(combine_op) \
#define affine_render_rotate(combine_op, alpha_op) \
{ \
affine_render_bg_pixel_##combine_op(alpha_op); \
for(i = 0; i < end; i++) \
{ \
pixel_x = source_x >> 8; \
pixel_y = source_y >> 8; \
\
if(((u32)pixel_x < (u32)width_height) && \
((u32)pixel_y < (u32)width_height)) \
{ \
break; \
} \
affine_render_bg_##combine_op(alpha_op); \
affine_render_next(combine_op); \
} \
\
for(; i < end; i++) \
{ \
pixel_x = source_x >> 8; \
pixel_y = source_y >> 8; \
\
if(((u32)pixel_x >= (u32)width_height) || \
((u32)pixel_y >= (u32)width_height)) \
{ \
affine_render_bg_remainder_##combine_op(alpha_op); \
break; \
} \
\
affine_render_rotate_pixel(combine_op, alpha_op); \
} \
} \
#define affine_render_rotate_wrap(combine_op, alpha_op) \
{ \
u32 wrap_mask = width_height - 1; \
for(i = 0; i < end; i++) \
{ \
pixel_x = (source_x >> 8) & wrap_mask; \
pixel_y = (source_y >> 8) & wrap_mask; \
\
affine_render_rotate_pixel(combine_op, alpha_op); \
} \
} \
// Build affine background renderers.
#define render_scanline_affine_builder(combine_op, alpha_op) \
void render_scanline_affine_##combine_op##_##alpha_op(u32 layer, \
u32 start, u32 end, void *scanline) \
{ \
render_scanline_extra_variables_##combine_op##_##alpha_op(affine); \
u32 bg_control = io_registers[REG_BG0CNT + layer]; \
u32 current_pixel; \
s32 source_x, source_y; \
u32 vcount = io_registers[REG_VCOUNT]; \
u32 pixel_x, pixel_y; \
u32 layer_offset = (layer - 2) * 8; \
s32 dx, dy; \
u32 map_size = (bg_control >> 14) & 0x03; \
u32 width_height = 1 << (7 + map_size); \
u32 map_pitch = map_size + 4; \
u8 *map_base = vram + (((bg_control >> 8) & 0x1F) * (1024 * 2)); \
u8 *tile_base = vram + (((bg_control >> 2) & 0x03) * (1024 * 16)); \
u8 *tile_ptr; \
u16 *palette = palette_ram_converted; \
u32 map_offset, last_map_offset = (u32)-1; \
u32 i; \
render_scanline_dest_##alpha_op *dest_ptr = \
((render_scanline_dest_##alpha_op *)scanline) + start; \
\
dx = (s16)io_registers[REG_BG2PA + layer_offset]; \
dy = (s16)io_registers[REG_BG2PC + layer_offset]; \
source_x = affine_reference_x[layer - 2] + (start * dx); \
source_y = affine_reference_y[layer - 2] + (start * dy); \
\
end -= start; \
\
switch(((bg_control >> 12) & 0x02) | (dy != 0)) \
{ \
case 0x00: \
affine_render_scale(combine_op, alpha_op); \
break; \
\
case 0x01: \
affine_render_rotate(combine_op, alpha_op); \
break; \
\
case 0x02: \
affine_render_scale_wrap(combine_op, alpha_op); \
break; \
\
case 0x03: \
affine_render_rotate_wrap(combine_op, alpha_op); \
break; \
} \
} \
render_scanline_affine_builder(base, normal);
render_scanline_affine_builder(transparent, normal);
render_scanline_affine_builder(base, color16);
render_scanline_affine_builder(transparent, color16);
render_scanline_affine_builder(base, color32);
render_scanline_affine_builder(transparent, color32);
render_scanline_affine_builder(base, alpha);
render_scanline_affine_builder(transparent, alpha);
#define bitmap_render_pixel_mode3(alpha_op) \
convert_palette(current_pixel); \
*dest_ptr = current_pixel \
#define bitmap_render_pixel_mode4(alpha_op) \
tile_expand_base_##alpha_op(0) \
#define bitmap_render_pixel_mode5(alpha_op) \
bitmap_render_pixel_mode3(alpha_op) \
#define bitmap_render_scale(type, alpha_op, width, height) \
pixel_y = (source_y >> 8); \
if((u32)pixel_y < (u32)height) \
{ \
pixel_x = (source_x >> 8); \
src_ptr += (pixel_y * width); \
if(dx == 0x100) \
{ \
if(pixel_x < 0) \
{ \
end += pixel_x; \
dest_ptr -= pixel_x; \
pixel_x = 0; \
} \
else \
\
if(pixel_x > 0) \
{ \
src_ptr += pixel_x; \
} \
\
if((pixel_x + end) >= width) \
end = (width - pixel_x); \
\
for(i = 0; (s32)i < (s32)end; i++) \
{ \
current_pixel = *src_ptr; \
bitmap_render_pixel_##type(alpha_op); \
src_ptr++; \
dest_ptr++; \
} \
} \
else \
{ \
if((u32)(source_y >> 8) < (u32)height) \
{ \
for(i = 0; i < end; i++) \
{ \
pixel_x = (source_x >> 8); \
\
if((u32)pixel_x < (u32)width) \
break; \
\
source_x += dx; \
dest_ptr++; \
} \
\
for(; i < end; i++) \
{ \
pixel_x = (source_x >> 8); \
\
if((u32)pixel_x >= (u32)width) \
break; \
\
current_pixel = src_ptr[pixel_x]; \
bitmap_render_pixel_##type(alpha_op); \
\
source_x += dx; \
dest_ptr++; \
} \
} \
} \
} \
#define bitmap_render_rotate(type, alpha_op, width, height) \
for(i = 0; i < end; i++) \
{ \
pixel_x = source_x >> 8; \
pixel_y = source_y >> 8; \
\
if(((u32)pixel_x < (u32)width) && ((u32)pixel_y < (u32)height)) \
break; \
\
source_x += dx; \
source_y += dy; \
dest_ptr++; \
} \
\
for(; i < end; i++) \
{ \
pixel_x = (source_x >> 8); \
pixel_y = (source_y >> 8); \
\
if(((u32)pixel_x >= (u32)width) || ((u32)pixel_y >= (u32)height)) \
break; \
\
current_pixel = src_ptr[pixel_x + (pixel_y * width)]; \
bitmap_render_pixel_##type(alpha_op); \
\
source_x += dx; \
source_y += dy; \
dest_ptr++; \
} \
#define render_scanline_vram_setup_mode3() \
u16 *src_ptr = (u16 *)vram \
#define render_scanline_vram_setup_mode4() \
u16 *palette = palette_ram_converted; \
u8 *src_ptr; \
if(io_registers[REG_DISPCNT] & 0x10) \
src_ptr = vram + 0xA000; \
else \
src_ptr = vram \
#define render_scanline_vram_setup_mode5() \
u16 *src_ptr; \
if(io_registers[REG_DISPCNT] & 0x10) \
src_ptr = (u16 *)(vram + 0xA000); \
else \
src_ptr = (u16 *)vram \
// Build bitmap scanline rendering functions.
#define render_scanline_bitmap_builder(type, alpha_op, width, height) \
void render_scanline_bitmap_##type##_##alpha_op(u32 start, u32 end, \
void *scanline) \
{ \
u32 bg_control = io_registers[REG_BG2CNT]; \
u32 current_pixel; \
s32 source_x, source_y; \
u32 vcount = io_registers[REG_VCOUNT]; \
s32 pixel_x, pixel_y; \
\
s32 dx = (s16)io_registers[REG_BG2PA]; \
s32 dy = (s16)io_registers[REG_BG2PC]; \
\
u32 i; \
\
render_scanline_dest_##alpha_op *dest_ptr = \
((render_scanline_dest_##alpha_op *)scanline) + start; \
render_scanline_vram_setup_##type(); \
\
end -= start; \
\
source_x = affine_reference_x[0] + (start * dx); \
source_y = affine_reference_y[0] + (start * dy); \
\
if(dy == 0) \
{ \
bitmap_render_scale(type, alpha_op, width, height); \
} \
else \
{ \
bitmap_render_rotate(type, alpha_op, width, height); \
} \
} \
render_scanline_bitmap_builder(mode3, normal, 240, 160);
render_scanline_bitmap_builder(mode4, normal, 240, 160);
render_scanline_bitmap_builder(mode5, normal, 160, 128);
// Fill in the renderers for a layer based on the mode type,
#define tile_layer_render_functions(type) \
{ \
render_scanline_##type##_base_normal, \
render_scanline_##type##_transparent_normal, \
render_scanline_##type##_base_alpha, \
render_scanline_##type##_transparent_alpha, \
render_scanline_##type##_base_color16, \
render_scanline_##type##_transparent_color16, \
render_scanline_##type##_base_color32, \
render_scanline_##type##_transparent_color32 \
} \
// Use if a layer is unsupported for that mode.
#define tile_layer_render_null() \
{ \
NULL, NULL, NULL, NULL \
} \
#define bitmap_layer_render_functions(type) \
{ \
render_scanline_bitmap_##type##_normal \
} \
// Structs containing functions to render the layers for each mode, for
// each render type.
tile_layer_render_struct tile_mode_renderers[3][4] =
{
{
tile_layer_render_functions(text), tile_layer_render_functions(text),
tile_layer_render_functions(text), tile_layer_render_functions(text)
},
{
tile_layer_render_functions(text), tile_layer_render_functions(text),
tile_layer_render_functions(affine), tile_layer_render_functions(text)
},
{
tile_layer_render_functions(text), tile_layer_render_functions(text),
tile_layer_render_functions(affine), tile_layer_render_functions(affine)
}
};
bitmap_layer_render_struct bitmap_mode_renderers[3] =
{
bitmap_layer_render_functions(mode3),
bitmap_layer_render_functions(mode4),
bitmap_layer_render_functions(mode5)
};
#define render_scanline_layer_functions_tile() \
tile_layer_render_struct *layer_renderers = \
tile_mode_renderers[dispcnt & 0x07] \
#define render_scanline_layer_functions_bitmap() \
bitmap_layer_render_struct *layer_renderers = \
bitmap_mode_renderers + ((dispcnt & 0x07) - 3) \
// Adjust a flipped obj's starting position
#define obj_tile_offset_noflip(color_depth) \
#define obj_tile_offset_flip(color_depth) \
+ (tile_size_##color_depth * ((obj_width - 8) / 8)) \
// Adjust the obj's starting point if it goes too far off the left edge of \
// the screen. \
#define obj_tile_right_offset_noflip(color_depth) \
tile_ptr += (partial_tile_offset / 8) * tile_size_##color_depth \
#define obj_tile_right_offset_flip(color_depth) \
tile_ptr -= (partial_tile_offset / 8) * tile_size_##color_depth \
// Get the current row offset into an obj in 1D map space
#define obj_tile_offset_1D(color_depth, flip_op) \
tile_ptr = tile_base + ((obj_attribute_2 & 0x3FF) * 32) \
+ ((vertical_offset / 8) * (obj_width / 8) * tile_size_##color_depth) \
+ ((vertical_offset % 8) * tile_width_##color_depth) \
obj_tile_offset_##flip_op(color_depth) \
// Get the current row offset into an obj in 2D map space
#define obj_tile_offset_2D(color_depth, flip_op) \
tile_ptr = tile_base + ((obj_attribute_2 & 0x3FF) * 32) \
+ ((vertical_offset / 8) * 1024) \
+ ((vertical_offset % 8) * tile_width_##color_depth) \
obj_tile_offset_##flip_op(color_depth) \
// Get the palette for 4bpp obj.
#define obj_get_palette_4bpp() \
current_palette = (obj_attribute_2 >> 8) & 0xF0 \
#define obj_get_palette_8bpp() \
// Render the current row of an obj.
#define obj_render(combine_op, color_depth, alpha_op, map_space, flip_op) \
{ \
obj_get_palette_##color_depth(); \
obj_tile_offset_##map_space(color_depth, flip_op); \
\
if(obj_x < (s32)start) \
{ \
dest_ptr = scanline + start; \
pixel_run = obj_width - (start - obj_x); \
if((s32)pixel_run > 0) \
{ \
if((obj_x + obj_width) >= end) \
{ \
pixel_run = end - start; \
partial_tile_offset = start - obj_x; \
obj_tile_right_offset_##flip_op(color_depth); \
partial_tile_offset %= 8; \
\
if(partial_tile_offset) \
{ \
partial_tile_run = 8 - partial_tile_offset; \
if((s32)pixel_run < (s32)partial_tile_run) \
{ \
if((s32)pixel_run > 0) \
{ \
partial_tile_run = pixel_run; \
partial_tile_mid_obj(combine_op, color_depth, alpha_op, \
flip_op); \
} \
continue; \
} \
else \
{ \
pixel_run -= partial_tile_run; \
partial_tile_right_obj(combine_op, color_depth, alpha_op, \
flip_op); \
} \
} \
tile_run = pixel_run / 8; \
multiple_tile_obj(combine_op, color_depth, alpha_op, flip_op); \
partial_tile_run = pixel_run % 8; \
if(partial_tile_run) \
{ \
partial_tile_left_obj(combine_op, color_depth, alpha_op, \
flip_op); \
} \
} \
else \
{ \
partial_tile_offset = start - obj_x; \
obj_tile_right_offset_##flip_op(color_depth); \
partial_tile_offset %= 8; \
if(partial_tile_offset) \
{ \
partial_tile_run = 8 - partial_tile_offset; \
partial_tile_right_obj(combine_op, color_depth, alpha_op, \
flip_op); \
} \
tile_run = pixel_run / 8; \
multiple_tile_obj(combine_op, color_depth, alpha_op, flip_op); \
} \
} \
} \
else \
\
if((obj_x + obj_width) >= end) \
{ \
pixel_run = end - obj_x; \
if((s32)pixel_run > 0) \
{ \
dest_ptr = scanline + obj_x; \
tile_run = pixel_run / 8; \
multiple_tile_obj(combine_op, color_depth, alpha_op, flip_op); \
partial_tile_run = pixel_run % 8; \
if(partial_tile_run) \
{ \
partial_tile_left_obj(combine_op, color_depth, alpha_op, flip_op); \
} \
} \
} \
else \
{ \
dest_ptr = scanline + obj_x; \
tile_run = obj_width / 8; \
multiple_tile_obj(combine_op, color_depth, alpha_op, flip_op); \
} \
} \
#define obj_scale_offset_1D(color_depth) \
tile_ptr = tile_base + ((obj_attribute_2 & 0x3FF) * 32) \
+ ((vertical_offset / 8) * (max_x / 8) * tile_size_##color_depth) \
+ ((vertical_offset % 8) * tile_width_##color_depth) \
// Get the current row offset into an obj in 2D map space
#define obj_scale_offset_2D(color_depth) \
tile_ptr = tile_base + ((obj_attribute_2 & 0x3FF) * 32) \
+ ((vertical_offset / 8) * 1024) \
+ ((vertical_offset % 8) * tile_width_##color_depth) \
#define obj_render_scale_pixel_4bpp(combine_op, alpha_op) \
if(tile_x & 0x01) \
{ \
current_pixel = tile_ptr[tile_map_offset + ((tile_x >> 1) & 0x03)] >> 4; \
} \
else \
{ \
current_pixel = \
tile_ptr[tile_map_offset + ((tile_x >> 1) & 0x03)] & 0x0F; \
} \
\
tile_4bpp_draw_##combine_op(0, none, 0, alpha_op) \
#define obj_render_scale_pixel_8bpp(combine_op, alpha_op) \
current_pixel = tile_ptr[tile_map_offset + (tile_x & 0x07)]; \
tile_8bpp_draw_##combine_op(0, none, 0, alpha_op); \
#define obj_render_scale(combine_op, color_depth, alpha_op, map_space) \
{ \
u32 vertical_offset; \
source_y += (y_delta * dmy); \
vertical_offset = (source_y >> 8); \
if((u32)vertical_offset < (u32)max_y) \
{ \
obj_scale_offset_##map_space(color_depth); \
source_x += (y_delta * dmx) - (middle_x * dx); \
\
for(i = 0; i < obj_width; i++) \
{ \
tile_x = (source_x >> 8); \
\
if((u32)tile_x < (u32)max_x) \
break; \
\
source_x += dx; \
advance_dest_ptr_##combine_op(1); \
} \
\
for(; i < obj_width; i++) \
{ \
tile_x = (source_x >> 8); \
\
if((u32)tile_x >= (u32)max_x) \
break; \
\
tile_map_offset = (tile_x >> 3) * tile_size_##color_depth; \
obj_render_scale_pixel_##color_depth(combine_op, alpha_op); \
\
source_x += dx; \
advance_dest_ptr_##combine_op(1); \
} \
} \
} \
#define obj_rotate_offset_1D(color_depth) \
obj_tile_pitch = (max_x / 8) * tile_size_##color_depth \
#define obj_rotate_offset_2D(color_depth) \
obj_tile_pitch = 1024 \
#define obj_render_rotate_pixel_4bpp(combine_op, alpha_op) \
if(tile_x & 0x01) \
{ \
current_pixel = tile_ptr[tile_map_offset + \
((tile_x >> 1) & 0x03) + ((tile_y & 0x07) * obj_pitch)] >> 4; \
} \
else \
{ \
current_pixel = tile_ptr[tile_map_offset + \
((tile_x >> 1) & 0x03) + ((tile_y & 0x07) * obj_pitch)] & 0x0F; \
} \
\
tile_4bpp_draw_##combine_op(0, none, 0, alpha_op) \
#define obj_render_rotate_pixel_8bpp(combine_op, alpha_op) \
current_pixel = tile_ptr[tile_map_offset + \
(tile_x & 0x07) + ((tile_y & 0x07) * obj_pitch)]; \
\
tile_8bpp_draw_##combine_op(0, none, 0, alpha_op) \
#define obj_render_rotate(combine_op, color_depth, alpha_op, map_space) \
{ \
tile_ptr = tile_base + ((obj_attribute_2 & 0x3FF) * 32); \
obj_rotate_offset_##map_space(color_depth); \
\
source_x += (y_delta * dmx) - (middle_x * dx); \
source_y += (y_delta * dmy) - (middle_x * dy); \
\
for(i = 0; i < obj_width; i++) \
{ \
tile_x = (source_x >> 8); \
tile_y = (source_y >> 8); \
\
if(((u32)tile_x < (u32)max_x) && ((u32)tile_y < (u32)max_y)) \
break; \
\
source_x += dx; \
source_y += dy; \
advance_dest_ptr_##combine_op(1); \
} \
\
for(; i < obj_width; i++) \
{ \
tile_x = (source_x >> 8); \
tile_y = (source_y >> 8); \
\
if(((u32)tile_x >= (u32)max_x) || ((u32)tile_y >= (u32)max_y)) \
break; \
\
tile_map_offset = ((tile_x >> 3) * tile_size_##color_depth) + \
((tile_y >> 3) * obj_tile_pitch); \
obj_render_rotate_pixel_##color_depth(combine_op, alpha_op); \
\
source_x += dx; \
source_y += dy; \
advance_dest_ptr_##combine_op(1); \
} \
} \
// Render the current row of an affine transformed OBJ.
#define obj_render_affine(combine_op, color_depth, alpha_op, map_space) \
{ \
s16 *params = oam_ram + (((obj_attribute_1 >> 9) & 0x1F) * 16); \
s32 dx = params[3]; \
s32 dmx = params[7]; \
s32 dy = params[11]; \
s32 dmy = params[15]; \
s32 source_x, source_y; \
s32 tile_x, tile_y; \
u32 tile_offset; \
u32 tile_map_offset; \
s32 middle_x; \
s32 middle_y; \
s32 max_x = obj_width; \
s32 max_y = obj_height; \
s32 y_delta; \
u32 obj_pitch = tile_width_##color_depth; \
u32 obj_tile_pitch; \
\
middle_x = (obj_width / 2); \
middle_y = (obj_height / 2); \
\
source_x = (middle_x << 8); \
source_y = (middle_y << 8); \
\
\
if(obj_attribute_0 & 0x200) \
{ \
obj_width *= 2; \
obj_height *= 2; \
middle_x *= 2; \
middle_y *= 2; \
} \
\
if((s32)obj_x < (s32)start) \
{ \
u32 x_delta = start - obj_x; \
middle_x -= x_delta; \
obj_width -= x_delta; \
obj_x = start; \
\
if((s32)obj_width <= 0) \
continue; \
} \
\
if((s32)(obj_x + obj_width) >= (s32)end) \
{ \
obj_width = end - obj_x; \
\
if((s32)obj_width <= 0) \
continue; \
} \
dest_ptr = scanline + obj_x; \
\
y_delta = vcount - (obj_y + middle_y); \
\
obj_get_palette_##color_depth(); \
\
if(dy == 0) \
{ \
obj_render_scale(combine_op, color_depth, alpha_op, map_space); \
} \
else \
{ \
obj_render_rotate(combine_op, color_depth, alpha_op, map_space); \
} \
} \
u32 obj_width_table[] = { 8, 16, 32, 64, 16, 32, 32, 64, 8, 8, 16, 32 };
u32 obj_height_table[] = { 8, 16, 32, 64, 8, 8, 16, 32, 16, 32, 32, 64 };
u8 obj_priority_list[5][160][128];
u32 obj_priority_count[5][160];
u32 obj_alpha_count[160];
// Build obj rendering functions
#define render_scanline_obj_extra_variables_normal(map_space) \
#define render_scanline_obj_extra_variables_color() \
u32 dest; \
u32 pixel_combine = color_combine_mask(4) | (1 << 8) \
#define render_scanline_obj_extra_variables_alpha_obj(map_space) \
render_scanline_obj_extra_variables_color(); \
if((pixel_combine & 0x00000200) == 0) \
{ \
render_scanline_obj_color32_##map_space(priority, start, end, scanline); \
return; \
} \
#define render_scanline_obj_extra_variables_color16(map_space) \
render_scanline_obj_extra_variables_color() \
#define render_scanline_obj_extra_variables_color32(map_space) \
render_scanline_obj_extra_variables_color() \
#define render_scanline_obj_extra_variables_partial_alpha(map_space) \
render_scanline_obj_extra_variables_color(); \
u32 base_pixel_combine = pixel_combine \
#define render_scanline_obj_extra_variables_copy(type) \
u32 bldcnt = io_registers[REG_BLDCNT]; \
u32 dispcnt = io_registers[REG_DISPCNT]; \
u32 obj_enable = io_registers[REG_WINOUT] >> 8; \
render_scanline_layer_functions_##type(); \
u32 copy_start, copy_end; \
u16 copy_buffer[240]; \
u16 *copy_ptr \
#define render_scanline_obj_extra_variables_copy_tile(map_space) \
render_scanline_obj_extra_variables_copy(tile) \
#define render_scanline_obj_extra_variables_copy_bitmap(map_space) \
render_scanline_obj_extra_variables_copy(bitmap) \
#define render_scanline_obj_main(combine_op, alpha_op, map_space) \
if(obj_attribute_0 & 0x100) \
{ \
if((obj_attribute_0 >> 13) & 0x01) \
{ \
obj_render_affine(combine_op, 8bpp, alpha_op, map_space); \
} \
else \
{ \
obj_render_affine(combine_op, 4bpp, alpha_op, map_space); \
} \
} \
else \
{ \
vertical_offset = vcount - obj_y; \
\
if((obj_attribute_1 >> 13) & 0x01) \
vertical_offset = obj_height - vertical_offset - 1; \
\
switch(((obj_attribute_0 >> 12) & 0x02) | \
((obj_attribute_1 >> 12) & 0x01)) \
{ \
case 0x0: \
obj_render(combine_op, 4bpp, alpha_op, map_space, noflip); \
break; \
\
case 0x1: \
obj_render(combine_op, 4bpp, alpha_op, map_space, flip); \
break; \
\
case 0x2: \
obj_render(combine_op, 8bpp, alpha_op, map_space, noflip); \
break; \
\
case 0x3: \
obj_render(combine_op, 8bpp, alpha_op, map_space, flip); \
break; \
} \
} \
#define render_scanline_obj_no_partial_alpha(combine_op, alpha_op, map_space) \
render_scanline_obj_main(combine_op, alpha_op, map_space) \
#define render_scanline_obj_partial_alpha(combine_op, alpha_op, map_space) \
if((obj_attribute_0 >> 10) & 0x03) \
{ \
pixel_combine = 0x00000300; \
render_scanline_obj_main(combine_op, alpha_obj, map_space); \
} \
else \
{ \
pixel_combine = base_pixel_combine; \
render_scanline_obj_main(combine_op, color32, map_space); \
} \
#define render_scanline_obj_prologue_transparent(alpha_op) \
#define render_scanline_obj_prologue_copy_body(type) \
copy_start = obj_x; \
if(obj_attribute_0 & 0x200) \
copy_end = obj_x + (obj_width * 2); \
else \
copy_end = obj_x + obj_width; \
\
if(copy_start < start) \
copy_start = start; \
if(copy_end > end) \
copy_end = end; \
\
if((copy_start < end) && (copy_end > start)) \
{ \
render_scanline_conditional_##type(copy_start, copy_end, copy_buffer, \
obj_enable, dispcnt, bldcnt, layer_renderers); \
copy_ptr = copy_buffer + copy_start; \
} \
else \
{ \
continue; \
} \
#define render_scanline_obj_prologue_copy_tile() \
render_scanline_obj_prologue_copy_body(tile) \
#define render_scanline_obj_prologue_copy_bitmap() \
render_scanline_obj_prologue_copy_body(bitmap) \
#define render_scanline_obj_prologue_copy(alpha_op) \
render_scanline_obj_prologue_##alpha_op() \
#define render_scanline_obj_builder(combine_op, alpha_op, map_space, \
partial_alpha_op) \
void render_scanline_obj_##alpha_op##_##map_space(u32 priority, \
u32 start, u32 end, render_scanline_dest_##alpha_op *scanline) \
{ \
render_scanline_obj_extra_variables_##alpha_op(map_space); \
s32 obj_num, i; \
s32 obj_x, obj_y; \
s32 obj_size; \
s32 obj_width, obj_height; \
u32 obj_attribute_0, obj_attribute_1, obj_attribute_2; \
s32 vcount = io_registers[REG_VCOUNT]; \
u32 tile_run; \
u32 current_pixels; \
u32 current_pixel; \
u32 current_palette; \
u32 vertical_offset; \
u32 partial_tile_run, partial_tile_offset; \
u32 pixel_run; \
u16 *oam_ptr; \
u16 *palette = palette_ram_converted + 256; \
render_scanline_dest_##alpha_op *dest_ptr; \
u8 *tile_base = vram + 0x10000; \
u8 *tile_ptr; \
u32 obj_count = obj_priority_count[priority][vcount]; \
u8 *obj_list = obj_priority_list[priority][vcount]; \
\
for(obj_num = 0; obj_num < obj_count; obj_num++) \
{ \
oam_ptr = oam_ram + (obj_list[obj_num] * 4); \
obj_attribute_0 = oam_ptr[0]; \
obj_attribute_1 = oam_ptr[1]; \
obj_attribute_2 = oam_ptr[2]; \
obj_size = ((obj_attribute_0 >> 12) & 0x0C) | (obj_attribute_1 >> 14); \
\
obj_x = (s32)(obj_attribute_1 << 23) >> 23; \
obj_width = obj_width_table[obj_size]; \
\
render_scanline_obj_prologue_##combine_op(alpha_op); \
\
obj_y = obj_attribute_0 & 0xFF; \
\
if(obj_y > 160) \
obj_y -= 256; \
\
obj_height = obj_height_table[obj_size]; \
render_scanline_obj_##partial_alpha_op(combine_op, alpha_op, map_space); \
} \
} \
render_scanline_obj_builder(transparent, normal, 1D, no_partial_alpha);
render_scanline_obj_builder(transparent, normal, 2D, no_partial_alpha);
render_scanline_obj_builder(transparent, color16, 1D, no_partial_alpha);
render_scanline_obj_builder(transparent, color16, 2D, no_partial_alpha);
render_scanline_obj_builder(transparent, color32, 1D, no_partial_alpha);
render_scanline_obj_builder(transparent, color32, 2D, no_partial_alpha);
render_scanline_obj_builder(transparent, alpha_obj, 1D, no_partial_alpha);
render_scanline_obj_builder(transparent, alpha_obj, 2D, no_partial_alpha);
render_scanline_obj_builder(transparent, partial_alpha, 1D, partial_alpha);
render_scanline_obj_builder(transparent, partial_alpha, 2D, partial_alpha);
render_scanline_obj_builder(copy, copy_tile, 1D, no_partial_alpha);
render_scanline_obj_builder(copy, copy_tile, 2D, no_partial_alpha);
render_scanline_obj_builder(copy, copy_bitmap, 1D, no_partial_alpha);
render_scanline_obj_builder(copy, copy_bitmap, 2D, no_partial_alpha);
void order_obj(u32 video_mode)
{
s32 obj_num, priority, row;
s32 obj_x, obj_y;
s32 obj_size, obj_mode;
s32 obj_width, obj_height;
u32 obj_priority;
u32 obj_attribute_0, obj_attribute_1, obj_attribute_2;
s32 vcount = io_registers[REG_VCOUNT];
u32 partial_tile_run, partial_tile_offset;
u32 pixel_run;
u32 current_count;
u16 *oam_ptr = oam_ram + 508;
u16 *palette = palette_ram_converted + 256;
u16 *dest_ptr;
u8 *tile_base = vram + 0x10000;
u8 *tile_ptr;
for(priority = 0; priority < 5; priority++)
{
for(row = 0; row < 160; row++)
{
obj_priority_count[priority][row] = 0;
}
}
for(row = 0; row < 160; row++)
{
obj_alpha_count[row] = 0;
}
for(obj_num = 127; obj_num >= 0; obj_num--, oam_ptr -= 4)
{
obj_attribute_0 = oam_ptr[0];
obj_attribute_2 = oam_ptr[2];
obj_size = obj_attribute_0 & 0xC000;
obj_priority = (obj_attribute_2 >> 10) & 0x03;
obj_mode = (obj_attribute_0 >> 10) & 0x03;
if(((obj_attribute_0 & 0x0300) != 0x0200) && (obj_size != 0xC000) &&
(obj_mode != 3) && ((video_mode < 3) ||
((obj_attribute_2 & 0x3FF) >= 512)))
{
obj_y = obj_attribute_0 & 0xFF;
if(obj_y > 160)
obj_y -= 256;
obj_attribute_1 = oam_ptr[1];
obj_size = ((obj_size >> 12) & 0x0C) | (obj_attribute_1 >> 14);
obj_height = obj_height_table[obj_size];
obj_width = obj_width_table[obj_size];
if(obj_attribute_0 & 0x200)
{
obj_height *= 2;
obj_width *= 2;
}
if(((obj_y + obj_height) > 0) && (obj_y < 160))
{
obj_x = (s32)(obj_attribute_1 << 23) >> 23;
if(((obj_x + obj_width) > 0) && (obj_x < 240))
{
if(obj_y < 0)
{
obj_height += obj_y;
obj_y = 0;
}
if((obj_y + obj_height) >= 160)
{
obj_height = 160 - obj_y;
}
if(obj_mode == 1)
{
for(row = obj_y; row < obj_y + obj_height; row++)
{
current_count = obj_priority_count[obj_priority][row];
obj_priority_list[obj_priority][row][current_count] = obj_num;
obj_priority_count[obj_priority][row] = current_count + 1;
obj_alpha_count[row]++;
}
}
else
{
if(obj_mode == 2)
{
obj_priority = 4;
}
for(row = obj_y; row < obj_y + obj_height; row++)
{
current_count = obj_priority_count[obj_priority][row];
obj_priority_list[obj_priority][row][current_count] = obj_num;
obj_priority_count[obj_priority][row] = current_count + 1;
}
}
}
}
}
}
}
u32 layer_order[16];
u32 layer_count;
u32 order_layers(u32 layer_flags)
{
s32 priority, layer_number;
layer_count = 0;
for(priority = 3; priority >= 0; priority--)
{
for(layer_number = 3; layer_number >= 0; layer_number--)
{
if(((layer_flags >> layer_number) & 1) &&
((io_registers[REG_BG0CNT + layer_number] & 0x03) == priority))
{
layer_order[layer_count] = layer_number;
layer_count++;
}
}
if((obj_priority_count[priority][io_registers[REG_VCOUNT]] > 0)
&& (layer_flags & 0x10))
{
layer_order[layer_count] = priority | 0x04;
layer_count++;
}
}
}
#define fill_line(_start, _end) \
u32 i; \
\
for(i = _start; i < _end; i++) \
{ \
dest_ptr[i] = color; \
} \
#define fill_line_color_normal() \
color = palette_ram_converted[color] \
#define fill_line_color_alpha() \
#define fill_line_color_color16() \
#define fill_line_color_color32() \
#define fill_line_builder(type) \
void fill_line_##type(u16 color, render_scanline_dest_##type *dest_ptr, \
u32 start, u32 end) \
{ \
fill_line_color_##type(); \
fill_line(start, end); \
} \
fill_line_builder(normal);
fill_line_builder(alpha);
fill_line_builder(color16);
fill_line_builder(color32);
// Alpha blend two pixels (pixel_top and pixel_bottom).
#define blend_pixel() \
pixel_bottom = palette_ram_converted[(pixel_pair >> 16) & 0x1FF]; \
pixel_bottom = (pixel_bottom | (pixel_bottom << 16)) & 0x07E0F81F; \
pixel_top = ((pixel_top * blend_a) + (pixel_bottom * blend_b)) >> 4; \
// Alpha blend two pixels, allowing for saturation (individual channels > 31).
// The operation is optimized towards saturation not occuring.
#define blend_saturate_pixel() \
pixel_bottom = palette_ram_converted[(pixel_pair >> 16) & 0x1FF]; \
pixel_bottom = (pixel_bottom | (pixel_bottom << 16)) & 0x07E0F81F; \
pixel_top = ((pixel_top * blend_a) + (pixel_bottom * blend_b)) >> 4; \
if(pixel_top & 0x08010020) \
{ \
if(pixel_top & 0x08000000) \
pixel_top |= 0x07E00000; \
\
if(pixel_top & 0x00010000) \
pixel_top |= 0x0000F800; \
\
if(pixel_top & 0x00000020) \
pixel_top |= 0x0000001F; \
} \
#define brighten_pixel() \
pixel_top = upper + ((pixel_top * blend) >> 4); \
#define darken_pixel() \
pixel_top = (pixel_top * blend) >> 4; \
#define effect_condition_alpha \
((pixel_pair & 0x04000200) == 0x04000200) \
#define effect_condition_fade(pixel_source) \
((pixel_source & 0x00000200) == 0x00000200) \
#define expand_pixel_no_dest(expand_type, pixel_source) \
pixel_top = (pixel_top | (pixel_top << 16)) & 0x07E0F81F; \
expand_type##_pixel(); \
pixel_top &= 0x07E0F81F; \
pixel_top = (pixel_top >> 16) | pixel_top \
#define expand_pixel(expand_type, pixel_source) \
pixel_top = palette_ram_converted[pixel_source & 0x1FF]; \
expand_pixel_no_dest(expand_type, pixel_source); \
*screen_dest_ptr = pixel_top \
#define expand_loop(expand_type, effect_condition, pixel_source) \
screen_src_ptr += start; \
screen_dest_ptr += start; \
\
end -= start; \
\
for(i = 0; i < end; i++) \
{ \
pixel_source = *screen_src_ptr; \
if(effect_condition) \
{ \
expand_pixel(expand_type, pixel_source); \
} \
else \
{ \
*screen_dest_ptr = \
palette_ram_converted[pixel_source & 0x1FF]; \
} \
\
screen_src_ptr++; \
screen_dest_ptr++; \
} \
#define expand_loop_partial_alpha(alpha_expand, expand_type) \
screen_src_ptr += start; \
screen_dest_ptr += start; \
\
end -= start; \
\
for(i = 0; i < end; i++) \
{ \
pixel_pair = *screen_src_ptr; \
if(effect_condition_fade(pixel_pair)) \
{ \
if(effect_condition_alpha) \
{ \
expand_pixel(alpha_expand, pixel_pair); \
} \
else \
{ \
expand_pixel(expand_type, pixel_pair); \
} \
} \
else \
{ \
*screen_dest_ptr = \
palette_ram_converted[pixel_pair & 0x1FF]; \
} \
\
screen_src_ptr++; \
screen_dest_ptr++; \
} \
#define expand_partial_alpha(expand_type) \
if((blend_a + blend_b) > 16) \
{ \
expand_loop_partial_alpha(blend_saturate, expand_type); \
} \
else \
{ \
expand_loop_partial_alpha(blend, expand_type); \
} \
// Blend top two pixels of scanline with each other.
void expand_blend(u32 *screen_src_ptr, u16 *screen_dest_ptr,
u32 start, u32 end)
{
u32 pixel_pair;
u32 pixel_top, pixel_bottom;
u32 bldalpha = io_registers[REG_BLDALPHA];
u32 blend_a = bldalpha & 0x1F;
u32 blend_b = (bldalpha >> 8) & 0x1F;
u32 i;
if(blend_a > 16)
blend_a = 16;
if(blend_b > 16)
blend_b = 16;
// The individual colors can saturate over 31, this should be taken
// care of in an alternate pass as it incurs a huge additional speedhit.
if((blend_a + blend_b) > 16)
{
expand_loop(blend_saturate, effect_condition_alpha, pixel_pair);
}
else
{
expand_loop(blend, effect_condition_alpha, pixel_pair);
}
}
// Blend scanline with white.
void expand_darken(u16 *screen_src_ptr, u16 *screen_dest_ptr,
u32 start, u32 end)
{
u32 pixel_top;
s32 blend = 16 - (io_registers[REG_BLDY] & 0x1F);
u32 i;
if(blend < 0)
blend = 0;
expand_loop(darken, effect_condition_fade(pixel_top), pixel_top);
}
// Blend scanline with black.
void expand_brighten(u16 *screen_src_ptr, u16 *screen_dest_ptr,
u32 start, u32 end)
{
u32 pixel_top;
u32 blend = io_registers[REG_BLDY] & 0x1F;
u32 upper;
u32 i;
if(blend > 16)
blend = 16;
upper = ((0x07E0F81F * blend) >> 4) & 0x07E0F81F;
blend = 16 - blend;
expand_loop(brighten, effect_condition_fade(pixel_top), pixel_top);
}
// Expand scanline such that if both top and bottom pass it's alpha,
// if only top passes it's as specified, and if neither pass it's normal.
void expand_darken_partial_alpha(u32 *screen_src_ptr, u16 *screen_dest_ptr,
u32 start, u32 end)
{
s32 blend = 16 - (io_registers[REG_BLDY] & 0x1F);
u32 pixel_pair;
u32 pixel_top, pixel_bottom;
u32 bldalpha = io_registers[REG_BLDALPHA];
u32 blend_a = bldalpha & 0x1F;
u32 blend_b = (bldalpha >> 8) & 0x1F;
u32 i;
if(blend < 0)
blend = 0;
if(blend_a > 16)
blend_a = 16;
if(blend_b > 16)
blend_b = 16;
expand_partial_alpha(darken);
}
void expand_brighten_partial_alpha(u32 *screen_src_ptr, u16 *screen_dest_ptr,
u32 start, u32 end)
{
s32 blend = io_registers[REG_BLDY] & 0x1F;
u32 pixel_pair;
u32 pixel_top, pixel_bottom;
u32 bldalpha = io_registers[REG_BLDALPHA];
u32 blend_a = bldalpha & 0x1F;
u32 blend_b = (bldalpha >> 8) & 0x1F;
u32 upper;
u32 i;
if(blend > 16)
blend = 16;
upper = ((0x07E0F81F * blend) >> 4) & 0x07E0F81F;
blend = 16 - blend;
if(blend_a > 16)
blend_a = 16;
if(blend_b > 16)
blend_b = 16;
expand_partial_alpha(brighten);
}
// Render an OBJ layer from start to end, depending on the type (1D or 2D)
// stored in dispcnt.
#define render_obj_layer(type, dest, _start, _end) \
current_layer &= ~0x04; \
if(dispcnt & 0x40) \
render_scanline_obj_##type##_1D(current_layer, _start, _end, dest); \
else \
render_scanline_obj_##type##_2D(current_layer, _start, _end, dest) \
// Render a target all the way with the background color as taken from the
// palette.
#define fill_line_bg(type, dest, _start, _end) \
fill_line_##type(0, dest, _start, _end) \
// Render all layers as they appear in the layer order.
#define render_layers(tile_alpha, obj_alpha, dest) \
{ \
current_layer = layer_order[0]; \
if(current_layer & 0x04) \
{ \
/* If the first one is OBJ render the background then render it. */ \
fill_line_bg(tile_alpha, dest, 0, 240); \
render_obj_layer(obj_alpha, dest, 0, 240); \
} \
else \
{ \
/* Otherwise render a base layer. */ \
layer_renderers[current_layer].tile_alpha##_render_base(current_layer, \
0, 240, dest); \
} \
\
/* Render the rest of the layers. */ \
for(layer_order_pos = 1; layer_order_pos < layer_count; layer_order_pos++) \
{ \
current_layer = layer_order[layer_order_pos]; \
if(current_layer & 0x04) \
{ \
render_obj_layer(obj_alpha, dest, 0, 240); \
} \
else \
{ \
layer_renderers[current_layer]. \
tile_alpha##_render_transparent(current_layer, 0, 240, dest); \
} \
} \
} \
#define render_condition_alpha \
(((io_registers[REG_BLDALPHA] & 0x1F1F) != 0x001F) && \
((io_registers[REG_BLDCNT] & 0x3F) != 0) && \
((io_registers[REG_BLDCNT] & 0x3F00) != 0)) \
#define render_condition_fade \
(((io_registers[REG_BLDY] & 0x1F) != 0) && \
((io_registers[REG_BLDCNT] & 0x3F) != 0)) \
#define render_layers_color_effect(renderer, layer_condition, \
alpha_condition, fade_condition, _start, _end) \
{ \
if(layer_condition) \
{ \
if(obj_alpha_count[io_registers[REG_VCOUNT]] > 0) \
{ \
/* Render based on special effects mode. */ \
u32 screen_buffer[240]; \
switch((bldcnt >> 6) & 0x03) \
{ \
/* Alpha blend */ \
case 0x01: \
{ \
if(alpha_condition) \
{ \
renderer(alpha, alpha_obj, screen_buffer); \
expand_blend(screen_buffer, scanline, _start, _end); \
return; \
} \
break; \
} \
\
/* Fade to white */ \
case 0x02: \
{ \
if(fade_condition) \
{ \
renderer(color32, partial_alpha, screen_buffer); \
expand_brighten_partial_alpha(screen_buffer, scanline, \
_start, _end); \
return; \
} \
break; \
} \
\
/* Fade to black */ \
case 0x03: \
{ \
if(fade_condition) \
{ \
renderer(color32, partial_alpha, screen_buffer); \
expand_darken_partial_alpha(screen_buffer, scanline, \
_start, _end); \
return; \
} \
break; \
} \
} \
\
renderer(color32, partial_alpha, screen_buffer); \
expand_blend(screen_buffer, scanline, _start, _end); \
} \
else \
{ \
/* Render based on special effects mode. */ \
switch((bldcnt >> 6) & 0x03) \
{ \
/* Alpha blend */ \
case 0x01: \
{ \
if(alpha_condition) \
{ \
u32 screen_buffer[240]; \
renderer(alpha, alpha_obj, screen_buffer); \
expand_blend(screen_buffer, scanline, _start, _end); \
return; \
} \
break; \
} \
\
/* Fade to white */ \
case 0x02: \
{ \
if(fade_condition) \
{ \
renderer(color16, color16, scanline); \
expand_brighten(scanline, scanline, _start, _end); \
return; \
} \
break; \
} \
\
/* Fade to black */ \
case 0x03: \
{ \
if(fade_condition) \
{ \
renderer(color16, color16, scanline); \
expand_darken(scanline, scanline, _start, _end); \
return; \
} \
break; \
} \
} \
\
renderer(normal, normal, scanline); \
} \
} \
else \
{ \
u32 pixel_top = palette_ram_converted[0]; \
switch((bldcnt >> 6) & 0x03) \
{ \
/* Fade to white */ \
case 0x02: \
{ \
if(color_combine_mask_a(5)) \
{ \
u32 blend = io_registers[REG_BLDY] & 0x1F; \
u32 upper; \
\
if(blend > 16) \
blend = 16; \
\
upper = ((0x07E0F81F * blend) >> 4) & 0x07E0F81F; \
blend = 16 - blend; \
\
expand_pixel_no_dest(brighten, pixel_top); \
} \
break; \
} \
\
/* Fade to black */ \
case 0x03: \
{ \
if(color_combine_mask_a(5)) \
{ \
s32 blend = 16 - (io_registers[REG_BLDY] & 0x1F); \
\
if(blend < 0) \
blend = 0; \
\
expand_pixel_no_dest(darken, pixel_top); \
} \
break; \
} \
} \
fill_line_color16(pixel_top, scanline, _start, _end); \
} \
} \
// Renders an entire scanline from 0 to 240, based on current color mode.
void render_scanline_tile(u16 *scanline, u32 dispcnt)
{
u32 current_layer;
u32 layer_order_pos;
u32 bldcnt = io_registers[REG_BLDCNT];
render_scanline_layer_functions_tile();
render_layers_color_effect(render_layers, layer_count,
render_condition_alpha, render_condition_fade, 0, 240);
}
void render_scanline_bitmap(u16 *scanline, u32 dispcnt)
{
u32 bldcnt = io_registers[REG_BLDCNT];
render_scanline_layer_functions_bitmap();
u32 current_layer;
u32 layer_order_pos;
fill_line_bg(normal, scanline, 0, 240);
for(layer_order_pos = 0; layer_order_pos < layer_count; layer_order_pos++)
{
current_layer = layer_order[layer_order_pos];
if(current_layer & 0x04)
{
render_obj_layer(normal, scanline, 0, 240);
}
else
{
layer_renderers->normal_render(0, 240, scanline);
}
}
}
// Render layers from start to end based on if they're allowed in the
// enable flags.
#define render_layers_conditional(tile_alpha, obj_alpha, dest) \
{ \
__label__ skip; \
current_layer = layer_order[layer_order_pos]; \
/* If OBJ aren't enabled skip to the first non-OBJ layer */ \
if(!(enable_flags & 0x10)) \
{ \
while((current_layer & 0x04) || !((1 << current_layer) & enable_flags)) \
{ \
layer_order_pos++; \
current_layer = layer_order[layer_order_pos]; \
\
/* Oops, ran out of layers, render the background. */ \
if(layer_order_pos == layer_count) \
{ \
fill_line_bg(tile_alpha, dest, start, end); \
goto skip; \
} \
} \
\
/* Render the first valid layer */ \
layer_renderers[current_layer].tile_alpha##_render_base(current_layer, \
start, end, dest); \
\
layer_order_pos++; \
\
/* Render the rest of the layers if active, skipping OBJ ones. */ \
for(; layer_order_pos < layer_count; layer_order_pos++) \
{ \
current_layer = layer_order[layer_order_pos]; \
if(!(current_layer & 0x04) && ((1 << current_layer) & enable_flags)) \
{ \
layer_renderers[current_layer]. \
tile_alpha##_render_transparent(current_layer, start, end, dest); \
} \
} \
} \
else \
{ \
/* Find the first active layer, skip all of the inactive ones */ \
while(!((current_layer & 0x04) || ((1 << current_layer) & enable_flags))) \
{ \
layer_order_pos++; \
current_layer = layer_order[layer_order_pos]; \
\
/* Oops, ran out of layers, render the background. */ \
if(layer_order_pos == layer_count) \
{ \
fill_line_bg(tile_alpha, dest, start, end); \
goto skip; \
} \
} \
\
if(current_layer & 0x04) \
{ \
/* If the first one is OBJ render the background then render it. */ \
fill_line_bg(tile_alpha, dest, start, end); \
render_obj_layer(obj_alpha, dest, start, end); \
} \
else \
{ \
/* Otherwise render a base layer. */ \
layer_renderers[current_layer]. \
tile_alpha##_render_base(current_layer, start, end, dest); \
} \
\
layer_order_pos++; \
\
/* Render the rest of the layers. */ \
for(; layer_order_pos < layer_count; layer_order_pos++) \
{ \
current_layer = layer_order[layer_order_pos]; \
if(current_layer & 0x04) \
{ \
render_obj_layer(obj_alpha, dest, start, end); \
} \
else \
{ \
if(enable_flags & (1 << current_layer)) \
{ \
layer_renderers[current_layer]. \
tile_alpha##_render_transparent(current_layer, start, end, dest); \
} \
} \
} \
} \
\
skip: \
; \
} \
// Render all of the BG and OBJ in a tiled scanline from start to end ONLY if
// enable_flag allows that layer/OBJ. Also conditionally render color effects.
void render_scanline_conditional_tile(u32 start, u32 end, u16 *scanline,
u32 enable_flags, u32 dispcnt, u32 bldcnt, tile_layer_render_struct
*layer_renderers)
{
u32 current_layer;
u32 layer_order_pos = 0;
render_layers_color_effect(render_layers_conditional,
(layer_count && (enable_flags & 0x1F)),
((enable_flags & 0x20) && render_condition_alpha),
((enable_flags & 0x20) && render_condition_fade), start, end);
}
// Render the BG and OBJ in a bitmap scanline from start to end ONLY if
// enable_flag allows that layer/OBJ. Also conditionally render color effects.
void render_scanline_conditional_bitmap(u32 start, u32 end, u16 *scanline,
u32 enable_flags, u32 dispcnt, u32 bldcnt, bitmap_layer_render_struct
*layer_renderers)
{
u32 current_layer;
u32 layer_order_pos;
fill_line_bg(normal, scanline, start, end);
for(layer_order_pos = 0; layer_order_pos < layer_count; layer_order_pos++)
{
current_layer = layer_order[layer_order_pos];
if(current_layer & 0x04)
{
if(enable_flags & 0x10)
{
render_obj_layer(normal, scanline, start, end);
}
}
else
{
if(enable_flags & 0x04)
layer_renderers->normal_render(start, end, scanline);
}
}
}
#define window_x_coords(window_number) \
window_##window_number##_x1 = \
io_registers[REG_WIN##window_number##H] >> 8; \
window_##window_number##_x2 = \
io_registers[REG_WIN##window_number##H] & 0xFF; \
window_##window_number##_enable = \
(winin >> (window_number * 8)) & 0x3F; \
\
if(window_##window_number##_x1 > 240) \
window_##window_number##_x1 = 240; \
\
if(window_##window_number##_x2 > 240) \
window_##window_number##_x2 = 240 \
#define window_coords(window_number) \
u32 window_##window_number##_x1, window_##window_number##_x2; \
u32 window_##window_number##_y1, window_##window_number##_y2; \
u32 window_##window_number##_enable; \
window_##window_number##_y1 = \
io_registers[REG_WIN##window_number##V] >> 8; \
window_##window_number##_y2 = \
io_registers[REG_WIN##window_number##V] & 0xFF; \
\
if(window_##window_number##_y1 > window_##window_number##_y2) \
{ \
if((((vcount <= window_##window_number##_y2) || \
(vcount > window_##window_number##_y1)) || \
(window_##window_number##_y2 > 227)) && \
(window_##window_number##_y1 <= 227)) \
{ \
window_x_coords(window_number); \
} \
else \
{ \
window_##window_number##_x1 = 240; \
window_##window_number##_x2 = 240; \
} \
} \
else \
{ \
if((((vcount >= window_##window_number##_y1) && \
(vcount < window_##window_number##_y2)) || \
(window_##window_number##_y2 > 227)) && \
(window_##window_number##_y1 <= 227)) \
{ \
window_x_coords(window_number); \
} \
else \
{ \
window_##window_number##_x1 = 240; \
window_##window_number##_x2 = 240; \
} \
} \
#define render_window_segment(type, start, end, window_type) \
if(start != end) \
{ \
render_scanline_conditional_##type(start, end, scanline, \
window_##window_type##_enable, dispcnt, bldcnt, layer_renderers); \
} \
#define render_window_segment_unequal(type, start, end, window_type) \
render_scanline_conditional_##type(start, end, scanline, \
window_##window_type##_enable, dispcnt, bldcnt, layer_renderers) \
#define render_window_segment_clip(type, clip_start, clip_end, start, end, \
window_type) \
{ \
if(start != end) \
{ \
if(start < clip_start) \
{ \
if(end > clip_start) \
{ \
if(end > clip_end) \
{ \
render_window_segment_unequal(type, clip_start, clip_end, \
window_type); \
} \
else \
{ \
render_window_segment_unequal(type, clip_start, end, window_type); \
} \
} \
} \
else \
\
if(end > clip_end) \
{ \
if(start < clip_end) \
render_window_segment_unequal(type, start, clip_end, window_type); \
} \
else \
{ \
render_window_segment_unequal(type, start, end, window_type); \
} \
} \
} \
#define render_window_clip_1(type, start, end) \
if(window_1_x1 != 240) \
{ \
if(window_1_x1 > window_1_x2) \
{ \
render_window_segment_clip(type, start, end, 0, window_1_x2, 1); \
render_window_segment_clip(type, start, end, window_1_x2, window_1_x1, \
out); \
render_window_segment_clip(type, start, end, window_1_x1, 240, 1); \
} \
else \
{ \
render_window_segment_clip(type, start, end, 0, window_1_x1, out); \
render_window_segment_clip(type, start, end, window_1_x1, window_1_x2, \
1); \
render_window_segment_clip(type, start, end, window_1_x2, 240, out); \
} \
} \
else \
{ \
render_window_segment(type, start, end, out); \
} \
#define render_window_clip_obj(type, start, end); \
render_window_segment(type, start, end, out); \
if(dispcnt & 0x40) \
render_scanline_obj_copy_##type##_1D(4, start, end, scanline); \
else \
render_scanline_obj_copy_##type##_2D(4, start, end, scanline) \
#define render_window_segment_clip_obj(type, clip_start, clip_end, start, \
end) \
{ \
if(start != end) \
{ \
if(start < clip_start) \
{ \
if(end > clip_start) \
{ \
if(end > clip_end) \
{ \
render_window_clip_obj(type, clip_start, clip_end); \
} \
else \
{ \
render_window_clip_obj(type, clip_start, end); \
} \
} \
} \
else \
\
if(end > clip_end) \
{ \
if(start < clip_end) \
{ \
render_window_clip_obj(type, start, clip_end); \
} \
} \
else \
{ \
render_window_clip_obj(type, start, end); \
} \
} \
} \
#define render_window_clip_1_obj(type, start, end) \
if(window_1_x1 != 240) \
{ \
if(window_1_x1 > window_1_x2) \
{ \
render_window_segment_clip(type, start, end, 0, window_1_x2, 1); \
render_window_segment_clip_obj(type, start, end, window_1_x2, \
window_1_x1); \
render_window_segment_clip(type, start, end, window_1_x1, 240, 1); \
} \
else \
{ \
render_window_segment_clip_obj(type, start, end, 0, window_1_x1); \
render_window_segment_clip(type, start, end, window_1_x1, window_1_x2, \
1); \
render_window_segment_clip_obj(type, start, end, window_1_x2, 240); \
} \
} \
else \
{ \
render_window_clip_obj(type, start, end); \
} \
#define render_window_single(type, window_number) \
u32 winin = io_registers[REG_WININ]; \
window_coords(window_number); \
if(window_##window_number##_x1 > window_##window_number##_x2) \
{ \
render_window_segment(type, 0, window_##window_number##_x2, \
window_number); \
render_window_segment(type, window_##window_number##_x2, \
window_##window_number##_x1, out); \
render_window_segment(type, window_##window_number##_x1, 240, \
window_number); \
} \
else \
{ \
render_window_segment(type, 0, window_##window_number##_x1, out); \
render_window_segment(type, window_##window_number##_x1, \
window_##window_number##_x2, window_number); \
render_window_segment(type, window_##window_number##_x2, 240, out); \
} \
#define render_window_multi(type, front, back) \
if(window_##front##_x1 > window_##front##_x2) \
{ \
render_window_segment(type, 0, window_##front##_x2, front); \
render_window_clip_##back(type, window_##front##_x2, \
window_##front##_x1); \
render_window_segment(type, window_##front##_x1, 240, front); \
} \
else \
{ \
render_window_clip_##back(type, 0, window_##front##_x1); \
render_window_segment(type, window_##front##_x1, window_##front##_x2, \
front); \
render_window_clip_##back(type, window_##front##_x2, 240); \
} \
#define render_scanline_window_builder(type) \
void render_scanline_window_##type(u16 *scanline, u32 dispcnt) \
{ \
u32 vcount = io_registers[REG_VCOUNT]; \
u32 winout = io_registers[REG_WINOUT]; \
u32 bldcnt = io_registers[REG_BLDCNT]; \
u32 window_out_enable = winout & 0x3F; \
\
render_scanline_layer_functions_##type(); \
\
switch(dispcnt >> 13) \
{ \
/* Just window 0 */ \
case 0x01: \
{ \
render_window_single(type, 0); \
break; \
} \
\
/* Just window 1 */ \
case 0x02: \
{ \
render_window_single(type, 1); \
break; \
} \
\
/* Windows 1 and 2 */ \
case 0x03: \
{ \
u32 winin = io_registers[REG_WININ]; \
window_coords(0); \
window_coords(1); \
render_window_multi(type, 0, 1); \
break; \
} \
\
/* Just OBJ windows */ \
case 0x04: \
{ \
u32 window_obj_enable = winout >> 8; \
render_window_clip_obj(type, 0, 240); \
break; \
} \
\
/* Window 0 and OBJ window */ \
case 0x05: \
{ \
u32 window_obj_enable = winout >> 8; \
u32 winin = io_registers[REG_WININ]; \
window_coords(0); \
render_window_multi(type, 0, obj); \
break; \
} \
\
/* Window 1 and OBJ window */ \
case 0x06: \
{ \
u32 window_obj_enable = winout >> 8; \
u32 winin = io_registers[REG_WININ]; \
window_coords(1); \
render_window_multi(type, 1, obj); \
break; \
} \
\
/* Window 0, 1, and OBJ window */ \
case 0x07: \
{ \
u32 window_obj_enable = winout >> 8; \
u32 winin = io_registers[REG_WININ]; \
window_coords(0); \
window_coords(1); \
render_window_multi(type, 0, 1_obj); \
break; \
} \
} \
} \
render_scanline_window_builder(tile);
render_scanline_window_builder(bitmap);
u32 active_layers[6] = { 0x1F, 0x17, 0x1C, 0x14, 0x14, 0x14 };
u32 small_resolution_width = 240;
u32 small_resolution_height = 160;
u32 resolution_width, resolution_height;
void update_scanline()
{
u32 pitch = get_screen_pitch();
u32 dispcnt = io_registers[REG_DISPCNT];
u32 display_flags = (dispcnt >> 8) & 0x1F;
u32 vcount = io_registers[REG_VCOUNT];
u16 *screen_offset = get_screen_pixels() + (vcount * pitch);
u32 video_mode = dispcnt & 0x07;
u32 current_layer;
// If OAM has been modified since the last scanline has been updated then
// reorder and reprofile the OBJ lists.
if(oam_update)
{
order_obj(video_mode);
oam_update = 0;
}
order_layers((dispcnt >> 8) & active_layers[video_mode]);
if(skip_next_frame)
return;
// If the screen is in in forced blank draw pure white.
if(dispcnt & 0x80)
{
fill_line_color16(0xFFFF, screen_offset, 0, 240);
}
else
{
if(video_mode < 3)
{
if(dispcnt >> 13)
{
render_scanline_window_tile(screen_offset, dispcnt);
}
else
{
render_scanline_tile(screen_offset, dispcnt);
}
}
else
{
if(dispcnt >> 13)
render_scanline_window_bitmap(screen_offset, dispcnt);
else
render_scanline_bitmap(screen_offset, dispcnt);
}
}
affine_reference_x[0] += (s16)io_registers[REG_BG2PB];
affine_reference_y[0] += (s16)io_registers[REG_BG2PD];
affine_reference_x[1] += (s16)io_registers[REG_BG3PB];
affine_reference_y[1] += (s16)io_registers[REG_BG3PD];
}
#ifdef PSP_BUILD
u32 screen_flip = 0;
void flip_screen()
{
if(video_direct == 0)
{
u32 *old_ge_cmd_ptr = ge_cmd_ptr;
sceKernelDcacheWritebackAll();
// Render the current screen
ge_cmd_ptr = ge_cmd + 2;
GE_CMD(TBP0, ((u32)screen_pixels & 0x00FFFFFF));
GE_CMD(TBW0, (((u32)screen_pixels & 0xFF000000) >> 8) |
GBA_SCREEN_WIDTH);
ge_cmd_ptr = old_ge_cmd_ptr;
sceGeListEnQueue(ge_cmd, ge_cmd_ptr, gecbid, NULL);
// Flip to the next screen
screen_flip ^= 1;
if(screen_flip)
screen_pixels = screen_texture + (240 * 160 * 2);
else
screen_pixels = screen_texture;
}
}
#else
#define integer_scale_copy_2() \
current_scanline_ptr[x2] = current_pixel; \
current_scanline_ptr[x2 - 1] = current_pixel; \
x2 -= 2 \
#define integer_scale_copy_3() \
current_scanline_ptr[x2] = current_pixel; \
current_scanline_ptr[x2 - 1] = current_pixel; \
current_scanline_ptr[x2 - 2] = current_pixel; \
x2 -= 3 \
#define integer_scale_copy_4() \
current_scanline_ptr[x2] = current_pixel; \
current_scanline_ptr[x2 - 1] = current_pixel; \
current_scanline_ptr[x2 - 2] = current_pixel; \
current_scanline_ptr[x2 - 3] = current_pixel; \
x2 -= 4 \
#define integer_scale_horizontal(scale_factor) \
for(y = 0; y < 160; y++) \
{ \
for(x = 239, x2 = (240 * video_scale) - 1; x >= 0; x--) \
{ \
current_pixel = current_scanline_ptr[x]; \
integer_scale_copy_##scale_factor(); \
current_scanline_ptr[x2] = current_scanline_ptr[x]; \
current_scanline_ptr[x2 - 1] = current_scanline_ptr[x]; \
current_scanline_ptr[x2 - 2] = current_scanline_ptr[x]; \
} \
current_scanline_ptr += pitch; \
} \
void flip_screen()
{
if((video_scale != 1) && (current_scale != unscaled))
{
s32 x, y;
s32 x2, y2;
u16 *screen_ptr = get_screen_pixels();
u16 *current_scanline_ptr = screen_ptr;
u32 pitch = get_screen_pitch();
u16 current_pixel;
u32 i;
switch(video_scale)
{
case 2:
integer_scale_horizontal(2);
break;
case 3:
integer_scale_horizontal(3);
break;
default:
case 4:
integer_scale_horizontal(4);
break;
}
for(y = 159, y2 = (160 * video_scale) - 1; y >= 0; y--)
{
for(i = 0; i < video_scale; i++)
{
memcpy(screen_ptr + (y2 * pitch),
screen_ptr + (y * pitch), 480 * video_scale);
y2--;
}
}
}
SDL_Flip(screen);
}
#endif
u32 frame_to_render;
void update_screen()
{
if(!skip_next_frame)
flip_screen();
}
#ifdef PSP_BUILD
void init_video()
{
sceDisplaySetMode(0, PSP_SCREEN_WIDTH, PSP_SCREEN_HEIGHT);
sceDisplayWaitVblankStart();
sceDisplaySetFrameBuf((void*)psp_gu_vram_base, PSP_LINE_SIZE,
PSP_DISPLAY_PIXEL_FORMAT_565, PSP_DISPLAY_SETBUF_NEXTFRAME);
sceGuInit();
sceGuStart(GU_DIRECT, display_list);
sceGuDrawBuffer(GU_PSM_5650, (void*)0, PSP_LINE_SIZE);
sceGuDispBuffer(PSP_SCREEN_WIDTH, PSP_SCREEN_HEIGHT,
(void*)0, PSP_LINE_SIZE);
sceGuClear(GU_COLOR_BUFFER_BIT);
sceGuOffset(2048 - (PSP_SCREEN_WIDTH / 2), 2048 - (PSP_SCREEN_HEIGHT / 2));
sceGuViewport(2048, 2048, PSP_SCREEN_WIDTH, PSP_SCREEN_HEIGHT);
sceGuScissor(0, 0, PSP_SCREEN_WIDTH + 1, PSP_SCREEN_HEIGHT + 1);
sceGuEnable(GU_SCISSOR_TEST);
sceGuTexMode(GU_PSM_5650, 0, 0, GU_FALSE);
sceGuTexFunc(GU_TFX_REPLACE, GU_TCC_RGBA);
sceGuTexFilter(GU_LINEAR, GU_LINEAR);
sceGuEnable(GU_TEXTURE_2D);
sceGuFrontFace(GU_CW);
sceGuDisable(GU_BLEND);
sceGuFinish();
sceGuSync(0, 0);
sceDisplayWaitVblankStart();
sceGuDisplay(GU_TRUE);
PspGeCallbackData gecb;
gecb.signal_func = NULL;
gecb.signal_arg = NULL;
gecb.finish_func = Ge_Finish_Callback;
gecb.finish_arg = NULL;
gecbid = sceGeSetCallback(&gecb);
screen_vertex[0] = 0 + 0.5;
screen_vertex[1] = 0 + 0.5;
screen_vertex[2] = 0 + 0.5;
screen_vertex[3] = 0 + 0.5;
screen_vertex[4] = 0;
screen_vertex[5] = GBA_SCREEN_WIDTH - 0.5;
screen_vertex[6] = GBA_SCREEN_HEIGHT - 0.5;
screen_vertex[7] = PSP_SCREEN_WIDTH - 0.5;
screen_vertex[8] = PSP_SCREEN_HEIGHT - 0.5;
screen_vertex[9] = 0;
// Set framebuffer to PSP VRAM
GE_CMD(FBP, ((u32)psp_gu_vram_base & 0x00FFFFFF));
GE_CMD(FBW, (((u32)psp_gu_vram_base & 0xFF000000) >> 8) | PSP_LINE_SIZE);
// Set texture 0 to the screen texture
GE_CMD(TBP0, ((u32)screen_texture & 0x00FFFFFF));
GE_CMD(TBW0, (((u32)screen_texture & 0xFF000000) >> 8) | GBA_SCREEN_WIDTH);
// Set the texture size to 256 by 256 (2^8 by 2^8)
GE_CMD(TSIZE0, (8 << 8) | 8);
// Flush the texture cache
GE_CMD(TFLUSH, 0);
// Use 2D coordinates, no indeces, no weights, 32bit float positions,
// 32bit float texture coordinates
GE_CMD(VTYPE, (1 << 23) | (0 << 11) | (0 << 9) |
(3 << 7) | (0 << 5) | (0 << 2) | 3);
// Set the base of the index list pointer to 0
GE_CMD(BASE, 0);
// Set the rest of index list pointer to 0 (not being used)
GE_CMD(IADDR, 0);
// Set the base of the screen vertex list pointer
GE_CMD(BASE, ((u32)screen_vertex & 0xFF000000) >> 8);
// Set the rest of the screen vertex list pointer
GE_CMD(VADDR, ((u32)screen_vertex & 0x00FFFFFF));
// Primitive kick: render sprite (primitive 6), 2 vertices
GE_CMD(PRIM, (6 << 16) | 2);
// Done with commands
GE_CMD(FINISH, 0);
// Raise signal interrupt
GE_CMD(SIGNAL, 0);
GE_CMD(NOP, 0);
GE_CMD(NOP, 0);
}
#else
void init_video()
{
SDL_Init(SDL_INIT_VIDEO | SDL_INIT_JOYSTICK | SDL_INIT_NOPARACHUTE);
screen = SDL_SetVideoMode(240 * video_scale, 160 * video_scale, 16, 0);
SDL_ShowCursor(0);
}
#endif
video_scale_type screen_scale = scaled_aspect;
video_scale_type current_scale = scaled_aspect;
video_filter_type screen_filter = filter_bilinear;
#ifdef PSP_BUILD
void video_resolution_large()
{
if(video_direct != 1)
{
video_direct = 1;
screen_pixels = psp_gu_vram_base;
screen_pitch = 512;
sceGuStart(GU_DIRECT, display_list);
sceGuDispBuffer(PSP_SCREEN_WIDTH, PSP_SCREEN_HEIGHT,
(void*)0, PSP_LINE_SIZE);
sceGuFinish();
}
}
void set_gba_resolution(video_scale_type scale)
{
u32 filter_linear = 0;
screen_scale = scale;
switch(scale)
{
case unscaled:
screen_vertex[2] = 120 + 0.5;
screen_vertex[3] = 56 + 0.5;
screen_vertex[7] = GBA_SCREEN_WIDTH + 120 - 0.5;
screen_vertex[8] = GBA_SCREEN_HEIGHT + 56 - 0.5;
break;
case scaled_aspect:
screen_vertex[2] = 36 + 0.5;
screen_vertex[3] = 0 + 0.5;
screen_vertex[7] = 408 + 36 - 0.5;
screen_vertex[8] = PSP_SCREEN_HEIGHT - 0.5;
break;
case fullscreen:
screen_vertex[2] = 0;
screen_vertex[3] = 0;
screen_vertex[7] = PSP_SCREEN_WIDTH;
screen_vertex[8] = PSP_SCREEN_HEIGHT;
break;
}
sceGuStart(GU_DIRECT, display_list);
if(screen_filter == filter_bilinear)
sceGuTexFilter(GU_LINEAR, GU_LINEAR);
else
sceGuTexFilter(GU_NEAREST, GU_NEAREST);
sceGuFinish();
sceGuSync(0, 0);
clear_screen(0x0000);
}
void video_resolution_small()
{
if(video_direct != 0)
{
set_gba_resolution(screen_scale);
video_direct = 0;
screen_pixels = screen_texture;
screen_flip = 0;
screen_pitch = 240;
sceGuStart(GU_DIRECT, display_list);
sceGuDispBuffer(PSP_SCREEN_WIDTH, PSP_SCREEN_HEIGHT,
(void*)0, PSP_LINE_SIZE);
sceGuFinish();
}
}
void clear_screen(u16 color)
{
u32 i;
u16 *src_ptr = get_screen_pixels();
sceGuSync(0, 0);
for(i = 0; i < (512 * 272); i++, src_ptr++)
{
*src_ptr = color;
}
// I don't know why this doesn't work.
/* color = (((color & 0x1F) * 255 / 31) << 0) |
((((color >> 5) & 0x3F) * 255 / 63) << 8) |
((((color >> 11) & 0x1F) * 255 / 31) << 16) | (0xFF << 24);
sceGuStart(GU_DIRECT, display_list);
sceGuDrawBuffer(GU_PSM_5650, (void*)0, PSP_LINE_SIZE);
//sceGuDispBuffer(PSP_SCREEN_WIDTH, PSP_SCREEN_HEIGHT,
// (void*)0, PSP_LINE_SIZE);
sceGuClearColor(color);
sceGuClear(GU_COLOR_BUFFER_BIT);
sceGuFinish();
sceGuSync(0, 0); */
}
#else
void video_resolution_large()
{
if(current_scale != unscaled)
{
current_scale = unscaled;
screen = SDL_SetVideoMode(480, 272, 16, 0);
resolution_width = 480;
resolution_height = 272;
}
}
void video_resolution_small()
{
if(current_scale != screen_scale)
{
current_scale = screen_scale;
screen = SDL_SetVideoMode(small_resolution_width * video_scale,
small_resolution_height * video_scale, 16, 0);
resolution_width = small_resolution_width;
resolution_height = small_resolution_height;
}
}
void set_gba_resolution(video_scale_type scale)
{
if(screen_scale != scale)
{
screen_scale = scale;
switch(scale)
{
case unscaled:
case scaled_aspect:
case fullscreen:
small_resolution_width = 240 * video_scale;
small_resolution_height = 160 * video_scale;
break;
}
}
}
void clear_screen(u16 color)
{
u16 *dest_ptr = get_screen_pixels();
u32 line_skip = get_screen_pitch() - screen->w;
u32 x, y;
for(y = 0; y < screen->h; y++)
{
for(x = 0; x < screen->w; x++, dest_ptr++)
{
*dest_ptr = color;
}
dest_ptr += line_skip;
}
}
#endif
u16 *copy_screen()
{
u16 *copy = malloc(240 * 160 * 2);
memcpy(copy, get_screen_pixels(), 240 * 160 * 2);
return copy;
}
void blit_to_screen(u16 *src, u32 w, u32 h, u32 dest_x, u32 dest_y)
{
u32 pitch = get_screen_pitch();
u16 *dest_ptr = get_screen_pixels() + dest_x + (dest_y * pitch);
u16 *src_ptr = src;
u32 line_skip = pitch - w;
u32 x, y;
for(y = 0; y < h; y++)
{
for(x = 0; x < w; x++, src_ptr++, dest_ptr++)
{
*dest_ptr = *src_ptr;
}
dest_ptr += line_skip;
}
}
void print_string_ext(const char *str, u16 fg_color, u16 bg_color,
u32 x, u32 y, void *_dest_ptr, u32 pitch, u32 pad)
{
u16 *dest_ptr = (u16 *)_dest_ptr + (y * pitch) + x;
u8 current_char = str[0];
u32 current_row;
u32 glyph_offset;
u32 i = 0, i2, i3;
u32 str_index = 1;
u32 current_x = x;
while(current_char)
{
if(current_char == '\n')
{
y += FONT_HEIGHT;
current_x = x;
dest_ptr = get_screen_pixels() + (y * pitch) + x;
}
else
{
glyph_offset = _font_offset[current_char];
current_x += FONT_WIDTH;
for(i2 = 0; i2 < FONT_HEIGHT; i2++, glyph_offset++)
{
current_row = _font_bits[glyph_offset];
for(i3 = 0; i3 < FONT_WIDTH; i3++)
{
if((current_row >> (15 - i3)) & 0x01)
*dest_ptr = fg_color;
else
*dest_ptr = bg_color;
dest_ptr++;
}
dest_ptr += (pitch - FONT_WIDTH);
}
dest_ptr = dest_ptr - (pitch * FONT_HEIGHT) + FONT_WIDTH;
}
i++;
current_char = str[str_index];
if((i < pad) && (current_char == 0))
{
current_char = ' ';
}
else
{
str_index++;
}
if(current_x >= 480)
break;
}
}
void print_string(const char *str, u16 fg_color, u16 bg_color,
u32 x, u32 y)
{
print_string_ext(str, fg_color, bg_color, x, y, get_screen_pixels(),
get_screen_pitch(), 0);
}
void print_string_pad(const char *str, u16 fg_color, u16 bg_color,
u32 x, u32 y, u32 pad)
{
print_string_ext(str, fg_color, bg_color, x, y, get_screen_pixels(),
get_screen_pitch(), pad);
}
#define video_savestate_builder(type) \
void video_##type##_savestate(file_tag_type savestate_file) \
{ \
file_##type##_array(savestate_file, affine_reference_x); \
file_##type##_array(savestate_file, affine_reference_y); \
} \
video_savestate_builder(read);
video_savestate_builder(write_mem);
| BASLQC/gPSP | video.c | C | gpl-2.0 | 204,850 |
/*
* linux/kernel/exit.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/security.h>
#include <linux/cpu.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/freezer.h>
#include <linux/binfmts.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/profile.h>
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/kthread.h>
#include <linux/mempolicy.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
#include <linux/cgroup.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/posix-timers.h>
#include <linux/cn_proc.h>
#include <linux/mutex.h>
#include <linux/futex.h>
#include <linux/pipe_fs_i.h>
#include <linux/audit.h> /* for audit_free() */
#include <linux/resource.h>
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/tracehook.h>
#include <linux/fs_struct.h>
#include <linux/init_task.h>
#include <linux/perf_event.h>
#include <trace/events/sched.h>
#include <linux/hw_breakpoint.h>
#include <linux/oom.h>
#include <linux/writeback.h>
#include <linux/shm.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <litmus/litmus.h>
extern void exit_od_table(struct task_struct *t);
static void exit_mm(struct task_struct * tsk);
static void __unhash_process(struct task_struct *p, bool group_dead)
{
nr_threads--;
detach_pid(p, PIDTYPE_PID);
if (group_dead) {
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
list_del_rcu(&p->tasks);
list_del_init(&p->sibling);
__this_cpu_dec(process_counts);
}
list_del_rcu(&p->thread_group);
}
/*
* This function expects the tasklist_lock write-locked.
*/
static void __exit_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
bool group_dead = thread_group_leader(tsk);
struct sighand_struct *sighand;
struct tty_struct *uninitialized_var(tty);
cputime_t utime, stime;
sighand = rcu_dereference_check(tsk->sighand,
lockdep_tasklist_lock_is_held());
spin_lock(&sighand->siglock);
posix_cpu_timers_exit(tsk);
if (group_dead) {
posix_cpu_timers_exit_group(tsk);
tty = sig->tty;
sig->tty = NULL;
} else {
/*
* This can only happen if the caller is de_thread().
* FIXME: this is the temporary hack, we should teach
* posix-cpu-timers to handle this case correctly.
*/
if (unlikely(has_group_leader_pid(tsk)))
posix_cpu_timers_exit_group(tsk);
/*
* If there is any task waiting for the group exit
* then notify it:
*/
if (sig->notify_count > 0 && !--sig->notify_count)
wake_up_process(sig->group_exit_task);
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
/*
* Accumulate here the counters for all threads but the
* group leader as they die, so they can be added into
* the process-wide totals when those are taken.
* The group leader stays around as a zombie as long
* as there are other threads. When it gets reaped,
* the exit.c code will add its counts into these totals.
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
task_cputime(tsk, &utime, &stime);
sig->utime += utime;
sig->stime += stime;
sig->gtime += task_gtime(tsk);
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac);
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
}
sig->nr_threads--;
__unhash_process(tsk, group_dead);
/*
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
flush_sigqueue(&tsk->pending);
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
__cleanup_sighand(sighand);
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
if (group_dead) {
flush_sigqueue(&sig->shared_pending);
tty_kref_put(tty);
}
}
static void delayed_put_task_struct(struct rcu_head *rhp)
{
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
perf_event_delayed_put(tsk);
trace_sched_process_free(tsk);
put_task_struct(tsk);
}
void release_task(struct task_struct * p)
{
struct task_struct *leader;
int zap_leader;
repeat:
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
rcu_read_lock();
atomic_dec(&__task_cred(p)->user->processes);
rcu_read_unlock();
proc_flush_task(p);
write_lock_irq(&tasklist_lock);
ptrace_release_task(p);
__exit_signal(p);
/*
* If we are the last non-leader member of the thread
* group, and the leader is zombie, then notify the
* group leader's parent process. (if it wants notification.)
*/
zap_leader = 0;
leader = p->group_leader;
if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
*/
zap_leader = do_notify_parent(leader, leader->exit_signal);
if (zap_leader)
leader->exit_state = EXIT_DEAD;
}
write_unlock_irq(&tasklist_lock);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);
p = leader;
if (unlikely(zap_leader))
goto repeat;
}
/*
* This checks not only the pgrp, but falls back on the pid if no
* satisfactory pgrp is found. I dunno - gdb doesn't work correctly
* without this...
*
* The caller must hold rcu lock or the tasklist lock.
*/
struct pid *session_of_pgrp(struct pid *pgrp)
{
struct task_struct *p;
struct pid *sid = NULL;
p = pid_task(pgrp, PIDTYPE_PGID);
if (p == NULL)
p = pid_task(pgrp, PIDTYPE_PID);
if (p != NULL)
sid = task_session(p);
return sid;
}
/*
* Determine if a process group is "orphaned", according to the POSIX
* definition in 2.2.2.52. Orphaned process groups are not to be affected
* by terminal-generated stop signals. Newly orphaned process groups are
* to receive a SIGHUP and a SIGCONT.
*
* "I ask you, have you ever known what it is to be an orphan?"
*/
static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
{
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
if ((p == ignored_task) ||
(p->exit_state && thread_group_empty(p)) ||
is_global_init(p->real_parent))
continue;
if (task_pgrp(p->real_parent) != pgrp &&
task_session(p->real_parent) == task_session(p))
return 0;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
return 1;
}
int is_current_pgrp_orphaned(void)
{
int retval;
read_lock(&tasklist_lock);
retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
read_unlock(&tasklist_lock);
return retval;
}
static bool has_stopped_jobs(struct pid *pgrp)
{
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
if (p->signal->flags & SIGNAL_STOP_STOPPED)
return true;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
return false;
}
/*
* Check to see if any process groups have become orphaned as
* a result of our exiting, and if they have any stopped jobs,
* send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
static void
kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
{
struct pid *pgrp = task_pgrp(tsk);
struct task_struct *ignored_task = tsk;
if (!parent)
/* exit: our father is in a different pgrp than
* we are and we were the only connection outside.
*/
parent = tsk->real_parent;
else
/* reparent: our child is in a different pgrp than
* we are, and it was the only connection outside.
*/
ignored_task = NULL;
if (task_pgrp(parent) != pgrp &&
task_session(parent) == task_session(tsk) &&
will_become_orphaned_pgrp(pgrp, ignored_task) &&
has_stopped_jobs(pgrp)) {
__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
}
}
void __set_special_pids(struct pid *pid)
{
struct task_struct *curr = current->group_leader;
if (task_session(curr) != pid)
change_pid(curr, PIDTYPE_SID, pid);
if (task_pgrp(curr) != pid)
change_pid(curr, PIDTYPE_PGID, pid);
}
/*
* Let kernel threads use this to say that they allow a certain signal.
* Must not be used if kthread was cloned with CLONE_SIGHAND.
*/
int allow_signal(int sig)
{
if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
/* This is only needed for daemonize()'ed kthreads */
sigdelset(¤t->blocked, sig);
/*
* Kernel threads handle their own signals. Let the signal code
* know it'll be handled, so that they don't get converted to
* SIGKILL or just silently dropped.
*/
current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return 0;
}
EXPORT_SYMBOL(allow_signal);
int disallow_signal(int sig)
{
if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return 0;
}
EXPORT_SYMBOL(disallow_signal);
#ifdef CONFIG_MM_OWNER
/*
* A task is exiting. If it owned this mm, find a new owner for the mm.
*/
void mm_update_next_owner(struct mm_struct *mm)
{
struct task_struct *c, *g, *p = current;
retry:
/*
* If the exiting or execing task is not the owner, it's
* someone else's problem.
*/
if (mm->owner != p)
return;
/*
* The current owner is exiting/execing and there are no other
* candidates. Do not leave the mm pointing to a possibly
* freed task structure.
*/
if (atomic_read(&mm->mm_users) <= 1) {
mm->owner = NULL;
return;
}
read_lock(&tasklist_lock);
/*
* Search in the children
*/
list_for_each_entry(c, &p->children, sibling) {
if (c->mm == mm)
goto assign_new_owner;
}
/*
* Search in the siblings
*/
list_for_each_entry(c, &p->real_parent->children, sibling) {
if (c->mm == mm)
goto assign_new_owner;
}
/*
* Search through everything else. We should not get
* here often
*/
do_each_thread(g, c) {
if (c->mm == mm)
goto assign_new_owner;
} while_each_thread(g, c);
read_unlock(&tasklist_lock);
/*
* We found no owner yet mm_users > 1: this implies that we are
* most likely racing with swapoff (try_to_unuse()) or /proc or
* ptrace or page migration (get_task_mm()). Mark owner as NULL.
*/
mm->owner = NULL;
return;
assign_new_owner:
BUG_ON(c == p);
get_task_struct(c);
/*
* The task_lock protects c->mm from changing.
* We always want mm->owner->mm == mm
*/
task_lock(c);
/*
* Delay read_unlock() till we have the task_lock()
* to ensure that c does not slip away underneath us
*/
read_unlock(&tasklist_lock);
if (c->mm != mm) {
task_unlock(c);
put_task_struct(c);
goto retry;
}
mm->owner = c;
task_unlock(c);
put_task_struct(c);
}
#endif /* CONFIG_MM_OWNER */
/*
* Turn us into a lazy TLB process if we
* aren't already..
*/
static void exit_mm(struct task_struct * tsk)
{
struct mm_struct *mm = tsk->mm;
struct core_state *core_state;
mm_release(tsk, mm);
if (!mm)
return;
sync_mm_rss(mm);
/*
* Serialize with any possible pending coredump.
* We must hold mmap_sem around checking core_state
* and clearing tsk->mm. The core-inducing thread
* will increment ->nr_threads for each thread in the
* group with ->mm != NULL.
*/
down_read(&mm->mmap_sem);
core_state = mm->core_state;
if (core_state) {
struct core_thread self;
up_read(&mm->mmap_sem);
self.task = tsk;
self.next = xchg(&core_state->dumper.next, &self);
/*
* Implies mb(), the result of xchg() must be visible
* to core_state->dumper.
*/
if (atomic_dec_and_test(&core_state->nr_threads))
complete(&core_state->startup);
for (;;) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!self.task) /* see coredump_finish() */
break;
freezable_schedule();
}
__set_task_state(tsk, TASK_RUNNING);
down_read(&mm->mmap_sem);
}
atomic_inc(&mm->mm_count);
BUG_ON(mm != tsk->active_mm);
/* more a memory barrier than a real lock */
task_lock(tsk);
tsk->mm = NULL;
up_read(&mm->mmap_sem);
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mm_update_next_owner(mm);
mmput(mm);
}
/*
* When we die, we re-parent all our children, and try to:
* 1. give them to another thread in our thread group, if such a member exists
* 2. give it to the first ancestor process which prctl'd itself as a
* child_subreaper for its children (like a service manager)
* 3. give it to the init process (PID 1) in our pid namespace
*/
static struct task_struct *find_new_reaper(struct task_struct *father)
__releases(&tasklist_lock)
__acquires(&tasklist_lock)
{
struct pid_namespace *pid_ns = task_active_pid_ns(father);
struct task_struct *thread;
thread = father;
while_each_thread(father, thread) {
if (thread->flags & PF_EXITING)
continue;
if (unlikely(pid_ns->child_reaper == father))
pid_ns->child_reaper = thread;
return thread;
}
if (unlikely(pid_ns->child_reaper == father)) {
write_unlock_irq(&tasklist_lock);
if (unlikely(pid_ns == &init_pid_ns)) {
panic("Attempted to kill init! exitcode=0x%08x\n",
father->signal->group_exit_code ?:
father->exit_code);
}
zap_pid_ns_processes(pid_ns);
write_lock_irq(&tasklist_lock);
} else if (father->signal->has_child_subreaper) {
struct task_struct *reaper;
/*
* Find the first ancestor marked as child_subreaper.
* Note that the code below checks same_thread_group(reaper,
* pid_ns->child_reaper). This is what we need to DTRT in a
* PID namespace. However we still need the check above, see
* http://marc.info/?l=linux-kernel&m=131385460420380
*/
for (reaper = father->real_parent;
reaper != &init_task;
reaper = reaper->real_parent) {
if (same_thread_group(reaper, pid_ns->child_reaper))
break;
if (!reaper->signal->is_child_subreaper)
continue;
thread = reaper;
do {
if (!(thread->flags & PF_EXITING))
return reaper;
} while_each_thread(reaper, thread);
}
}
return pid_ns->child_reaper;
}
/*
* Any that need to be release_task'd are put on the @dead list.
*/
static void reparent_leader(struct task_struct *father, struct task_struct *p,
struct list_head *dead)
{
list_move_tail(&p->sibling, &p->real_parent->children);
/*
* If this is a threaded reparent there is no need to
* notify anyone anything has happened.
*/
if (same_thread_group(p->real_parent, father))
return;
/*
* We don't want people slaying init.
*
* Note: we do this even if it is EXIT_DEAD, wait_task_zombie()
* can change ->exit_state to EXIT_ZOMBIE. If this is the final
* state, do_notify_parent() was already called and ->exit_signal
* doesn't matter.
*/
p->exit_signal = SIGCHLD;
if (p->exit_state == EXIT_DEAD)
return;
/* If it has exited notify the new parent about this child's death. */
if (!p->ptrace &&
p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
if (do_notify_parent(p, p->exit_signal)) {
p->exit_state = EXIT_DEAD;
list_move_tail(&p->sibling, dead);
}
}
kill_orphaned_pgrp(p, father);
}
static void forget_original_parent(struct task_struct *father)
{
struct task_struct *p, *n, *reaper;
LIST_HEAD(dead_children);
write_lock_irq(&tasklist_lock);
/*
* Note that exit_ptrace() and find_new_reaper() might
* drop tasklist_lock and reacquire it.
*/
exit_ptrace(father);
reaper = find_new_reaper(father);
list_for_each_entry_safe(p, n, &father->children, sibling) {
struct task_struct *t = p;
do {
t->real_parent = reaper;
if (t->parent == father) {
BUG_ON(t->ptrace);
t->parent = t->real_parent;
}
if (t->pdeath_signal)
group_send_sig_info(t->pdeath_signal,
SEND_SIG_NOINFO, t);
} while_each_thread(p, t);
reparent_leader(father, p, &dead_children);
}
write_unlock_irq(&tasklist_lock);
BUG_ON(!list_empty(&father->children));
list_for_each_entry_safe(p, n, &dead_children, sibling) {
list_del_init(&p->sibling);
release_task(p);
}
}
/*
* Send signals to all our closest relatives so that they know
* to properly mourn us..
*/
static void exit_notify(struct task_struct *tsk, int group_dead)
{
bool autoreap;
/*
* This does two things:
*
* A. Make init inherit all the child processes
* B. Check to see if any process groups have become orphaned
* as a result of our exiting, and if they have any stopped
* jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
forget_original_parent(tsk);
write_lock_irq(&tasklist_lock);
if (group_dead)
kill_orphaned_pgrp(tsk->group_leader, NULL);
if (unlikely(tsk->ptrace)) {
int sig = thread_group_leader(tsk) &&
thread_group_empty(tsk) &&
!ptrace_reparented(tsk) ?
tsk->exit_signal : SIGCHLD;
autoreap = do_notify_parent(tsk, sig);
} else if (thread_group_leader(tsk)) {
autoreap = thread_group_empty(tsk) &&
do_notify_parent(tsk, tsk->exit_signal);
} else {
autoreap = true;
}
tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
/* mt-exec, de_thread() is waiting for group leader */
if (unlikely(tsk->signal->notify_count < 0))
wake_up_process(tsk->signal->group_exit_task);
write_unlock_irq(&tasklist_lock);
/* If the process is dead, release it - nobody will wait for it */
if (autoreap)
release_task(tsk);
}
#ifdef CONFIG_DEBUG_STACK_USAGE
static void check_stack_usage(void)
{
static DEFINE_SPINLOCK(low_water_lock);
static int lowest_to_date = THREAD_SIZE;
unsigned long free;
free = stack_not_used(current);
if (free >= lowest_to_date)
return;
spin_lock(&low_water_lock);
if (free < lowest_to_date) {
printk(KERN_WARNING "%s (%d) used greatest stack depth: "
"%lu bytes left\n",
current->comm, task_pid_nr(current), free);
lowest_to_date = free;
}
spin_unlock(&low_water_lock);
}
#else
static inline void check_stack_usage(void) {}
#endif
void do_exit(long code)
{
struct task_struct *tsk = current;
int group_dead;
profile_task_exit(tsk);
WARN_ON(blk_needs_flush_plug(tsk));
if (unlikely(in_interrupt()))
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
if (unlikely(is_realtime(tsk))) {
/* We would like the task to be polite
* and transition out of RT mode first.
* Let's give it a little help.
*/
litmus_do_exit(tsk);
BUG_ON(is_realtime(tsk));
}
/*
* If do_exit is called because this processes oopsed, it's possible
* that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
* continuing. Amongst other possible reasons, this is to prevent
* mm_release()->clear_child_tid() from writing to a user-controlled
* kernel address.
*/
set_fs(USER_DS);
ptrace_event(PTRACE_EVENT_EXIT, code);
validate_creds_for_do_exit(tsk);
/*
* We're taking recursive faults here in do_exit. Safest is to just
* leave this task alone and wait for reboot.
*/
if (unlikely(tsk->flags & PF_EXITING)) {
printk(KERN_ALERT
"Fixing recursive fault but reboot is needed!\n");
/*
* We can do this unlocked here. The futex code uses
* this flag just to verify whether the pi state
* cleanup has been done or not. In the worst case it
* loops once more. We pretend that the cleanup was
* done as there is no way to return. Either the
* OWNER_DIED bit is set by now or we push the blocked
* task into the wait for ever nirwana as well.
*/
tsk->flags |= PF_EXITPIDONE;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
exit_signals(tsk); /* sets PF_EXITING */
/*
* tsk->flags are checked in the futex code to protect against
* an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
preempt_count());
acct_update_integrals(tsk);
/* sync mm's RSS info before statistics gathering */
if (tsk->mm)
sync_mm_rss(tsk->mm);
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
if (tsk->mm)
setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
}
acct_collect(code, group_dead);
if (group_dead)
tty_audit_exit();
audit_free(tsk);
exit_od_table(tsk);
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
exit_mm(tsk);
if (group_dead)
acct_process();
trace_sched_process_exit(tsk);
exit_sem(tsk);
exit_shm(tsk);
exit_files(tsk);
exit_fs(tsk);
if (group_dead)
disassociate_ctty(1);
exit_task_namespaces(tsk);
exit_task_work(tsk);
check_stack_usage();
exit_thread();
/*
* Flush inherited counters to the parent - before the parent
* gets woken up by child-exit notifications.
*
* because of cgroup mode, must be called before cgroup_exit()
*/
perf_event_exit_task(tsk);
cgroup_exit(tsk, 1);
module_put(task_thread_info(tsk)->exec_domain->module);
proc_exit_connector(tsk);
/*
* FIXME: do that only when needed, using sched_exit tracepoint
*/
ptrace_put_breakpoints(tsk);
exit_notify(tsk, group_dead);
#ifdef CONFIG_NUMA
task_lock(tsk);
mpol_put(tsk->mempolicy);
tsk->mempolicy = NULL;
task_unlock(tsk);
#endif
#ifdef CONFIG_FUTEX
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
#endif
/*
* Make sure we are holding no locks:
*/
debug_check_no_locks_held(tsk);
/*
* We can do this unlocked here. The futex code uses this flag
* just to verify whether the pi state cleanup has been done
* or not. In the worst case it loops once more.
*/
tsk->flags |= PF_EXITPIDONE;
if (tsk->io_context)
exit_io_context(tsk);
if (tsk->splice_pipe)
free_pipe_info(tsk->splice_pipe);
if (tsk->task_frag.page)
put_page(tsk->task_frag.page);
validate_creds_for_do_exit(tsk);
preempt_disable();
if (tsk->nr_dirtied)
__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
exit_rcu();
/*
* The setting of TASK_RUNNING by try_to_wake_up() may be delayed
* when the following two conditions become true.
* - There is race condition of mmap_sem (It is acquired by
* exit_mm()), and
* - SMI occurs before setting TASK_RUNINNG.
* (or hypervisor of virtual machine switches to other guest)
* As a result, we may become TASK_RUNNING after becoming TASK_DEAD
*
* To avoid it, we have to wait for releasing tsk->pi_lock which
* is held by try_to_wake_up()
*/
smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock);
/* causes final put_task_struct in finish_task_switch(). */
tsk->state = TASK_DEAD;
tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
schedule();
BUG();
/* Avoid "noreturn function does return". */
for (;;)
cpu_relax(); /* For when BUG is null */
}
EXPORT_SYMBOL_GPL(do_exit);
void complete_and_exit(struct completion *comp, long code)
{
if (comp)
complete(comp);
do_exit(code);
}
EXPORT_SYMBOL(complete_and_exit);
SYSCALL_DEFINE1(exit, int, error_code)
{
do_exit((error_code&0xff)<<8);
}
/*
* Take down every thread in the group. This is called by fatal signals
* as well as by sys_exit_group (below).
*/
void
do_group_exit(int exit_code)
{
struct signal_struct *sig = current->signal;
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
if (signal_group_exit(sig))
exit_code = sig->group_exit_code;
else if (!thread_group_empty(current)) {
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
if (signal_group_exit(sig))
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
sig->group_exit_code = exit_code;
sig->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
}
do_exit(exit_code);
/* NOTREACHED */
}
/*
* this kills every thread in the thread group. Note that any externally
* wait4()-ing process will get the correct exit code - even if this
* thread is not the thread group leader.
*/
SYSCALL_DEFINE1(exit_group, int, error_code)
{
do_group_exit((error_code & 0xff) << 8);
/* NOTREACHED */
return 0;
}
struct wait_opts {
enum pid_type wo_type;
int wo_flags;
struct pid *wo_pid;
struct siginfo __user *wo_info;
int __user *wo_stat;
struct rusage __user *wo_rusage;
wait_queue_t child_wait;
int notask_error;
};
static inline
struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
{
if (type != PIDTYPE_PID)
task = task->group_leader;
return task->pids[type].pid;
}
static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
{
return wo->wo_type == PIDTYPE_MAX ||
task_pid_type(p, wo->wo_type) == wo->wo_pid;
}
static int eligible_child(struct wait_opts *wo, struct task_struct *p)
{
if (!eligible_pid(wo, p))
return 0;
/* Wait for all children (clone and not) if __WALL is set;
* otherwise, wait for clone children *only* if __WCLONE is
* set; otherwise, wait for non-clone children *only*. (Note:
* A "clone" child here is one that reports to its parent
* using a signal other than SIGCHLD.) */
if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
&& !(wo->wo_flags & __WALL))
return 0;
return 1;
}
static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
pid_t pid, uid_t uid, int why, int status)
{
struct siginfo __user *infop;
int retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
put_task_struct(p);
infop = wo->wo_info;
if (infop) {
if (!retval)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval)
retval = put_user(0, &infop->si_errno);
if (!retval)
retval = put_user((short)why, &infop->si_code);
if (!retval)
retval = put_user(pid, &infop->si_pid);
if (!retval)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = put_user(status, &infop->si_status);
}
if (!retval)
retval = pid;
return retval;
}
/*
* Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
{
unsigned long state;
int retval, status, traced;
pid_t pid = task_pid_vnr(p);
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
struct siginfo __user *infop;
if (!likely(wo->wo_flags & WEXITED))
return 0;
if (unlikely(wo->wo_flags & WNOWAIT)) {
int exit_code = p->exit_code;
int why;
get_task_struct(p);
read_unlock(&tasklist_lock);
if ((exit_code & 0x7f) == 0) {
why = CLD_EXITED;
status = exit_code >> 8;
} else {
why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
status = exit_code & 0x7f;
}
return wait_noreap_copyout(wo, p, pid, uid, why, status);
}
/*
* Try to move the task's state to DEAD
* only one thread is allowed to do this:
*/
state = xchg(&p->exit_state, EXIT_DEAD);
if (state != EXIT_ZOMBIE) {
BUG_ON(state != EXIT_DEAD);
return 0;
}
traced = ptrace_reparented(p);
/*
* It can be ptraced but not reparented, check
* thread_group_leader() to filter out sub-threads.
*/
if (likely(!traced) && thread_group_leader(p)) {
struct signal_struct *psig;
struct signal_struct *sig;
unsigned long maxrss;
cputime_t tgutime, tgstime;
/*
* The resource counters for the group leader are in its
* own task_struct. Those for dead threads in the group
* are in its signal_struct, as are those for the child
* processes it has previously reaped. All these
* accumulate in the parent's signal_struct c* fields.
*
* We don't bother to take a lock here to protect these
* p->signal fields, because they are only touched by
* __exit_signal, which runs with tasklist_lock
* write-locked anyway, and so is excluded here. We do
* need to protect the access to parent->signal fields,
* as other threads in the parent group can be right
* here reaping other children at the same time.
*
* We use thread_group_cputime_adjusted() to get times for the thread
* group, which consolidates times for all threads in the
* group including the group leader.
*/
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal;
sig = p->signal;
psig->cutime += tgutime + sig->cutime;
psig->cstime += tgstime + sig->cstime;
psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
psig->cmin_flt +=
p->min_flt + sig->min_flt + sig->cmin_flt;
psig->cmaj_flt +=
p->maj_flt + sig->maj_flt + sig->cmaj_flt;
psig->cnvcsw +=
p->nvcsw + sig->nvcsw + sig->cnvcsw;
psig->cnivcsw +=
p->nivcsw + sig->nivcsw + sig->cnivcsw;
psig->cinblock +=
task_io_get_inblock(p) +
sig->inblock + sig->cinblock;
psig->coublock +=
task_io_get_oublock(p) +
sig->oublock + sig->coublock;
maxrss = max(sig->maxrss, sig->cmaxrss);
if (psig->cmaxrss < maxrss)
psig->cmaxrss = maxrss;
task_io_accounting_add(&psig->ioac, &p->ioac);
task_io_accounting_add(&psig->ioac, &sig->ioac);
spin_unlock_irq(&p->real_parent->sighand->siglock);
}
/*
* Now we are sure this task is interesting, and no other
* thread can reap it because we set its state to EXIT_DEAD.
*/
read_unlock(&tasklist_lock);
retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
status = (p->signal->flags & SIGNAL_GROUP_EXIT)
? p->signal->group_exit_code : p->exit_code;
if (!retval && wo->wo_stat)
retval = put_user(status, wo->wo_stat);
infop = wo->wo_info;
if (!retval && infop)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval && infop)
retval = put_user(0, &infop->si_errno);
if (!retval && infop) {
int why;
if ((status & 0x7f) == 0) {
why = CLD_EXITED;
status >>= 8;
} else {
why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
status &= 0x7f;
}
retval = put_user((short)why, &infop->si_code);
if (!retval)
retval = put_user(status, &infop->si_status);
}
if (!retval && infop)
retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = pid;
if (traced) {
write_lock_irq(&tasklist_lock);
/* We dropped tasklist, ptracer could die and untrace */
ptrace_unlink(p);
/*
* If this is not a sub-thread, notify the parent.
* If parent wants a zombie, don't release it now.
*/
if (thread_group_leader(p) &&
!do_notify_parent(p, p->exit_signal)) {
p->exit_state = EXIT_ZOMBIE;
p = NULL;
}
write_unlock_irq(&tasklist_lock);
}
if (p != NULL)
release_task(p);
return retval;
}
static int *task_stopped_code(struct task_struct *p, bool ptrace)
{
if (ptrace) {
if (task_is_stopped_or_traced(p) &&
!(p->jobctl & JOBCTL_LISTENING))
return &p->exit_code;
} else {
if (p->signal->flags & SIGNAL_STOP_STOPPED)
return &p->signal->group_exit_code;
}
return NULL;
}
/**
* wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
* @wo: wait options
* @ptrace: is the wait for ptrace
* @p: task to wait for
*
* Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
*
* CONTEXT:
* read_lock(&tasklist_lock), which is released if return value is
* non-zero. Also, grabs and releases @p->sighand->siglock.
*
* RETURNS:
* 0 if wait condition didn't exist and search for other wait conditions
* should continue. Non-zero return, -errno on failure and @p's pid on
* success, implies that tasklist_lock is released and wait condition
* search should terminate.
*/
static int wait_task_stopped(struct wait_opts *wo,
int ptrace, struct task_struct *p)
{
struct siginfo __user *infop;
int retval, exit_code, *p_code, why;
uid_t uid = 0; /* unneeded, required by compiler */
pid_t pid;
/*
* Traditionally we see ptrace'd stopped tasks regardless of options.
*/
if (!ptrace && !(wo->wo_flags & WUNTRACED))
return 0;
if (!task_stopped_code(p, ptrace))
return 0;
exit_code = 0;
spin_lock_irq(&p->sighand->siglock);
p_code = task_stopped_code(p, ptrace);
if (unlikely(!p_code))
goto unlock_sig;
exit_code = *p_code;
if (!exit_code)
goto unlock_sig;
if (!unlikely(wo->wo_flags & WNOWAIT))
*p_code = 0;
uid = from_kuid_munged(current_user_ns(), task_uid(p));
unlock_sig:
spin_unlock_irq(&p->sighand->siglock);
if (!exit_code)
return 0;
/*
* Now we are pretty sure this task is interesting.
* Make sure it doesn't get reaped out from under us while we
* give up the lock and then examine it below. We don't want to
* keep holding onto the tasklist_lock while we call getrusage and
* possibly take page faults for user memory.
*/
get_task_struct(p);
pid = task_pid_vnr(p);
why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
read_unlock(&tasklist_lock);
if (unlikely(wo->wo_flags & WNOWAIT))
return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
if (!retval && wo->wo_stat)
retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
infop = wo->wo_info;
if (!retval && infop)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval && infop)
retval = put_user(0, &infop->si_errno);
if (!retval && infop)
retval = put_user((short)why, &infop->si_code);
if (!retval && infop)
retval = put_user(exit_code, &infop->si_status);
if (!retval && infop)
retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = pid;
put_task_struct(p);
BUG_ON(!retval);
return retval;
}
/*
* Handle do_wait work for one task in a live, non-stopped state.
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
{
int retval;
pid_t pid;
uid_t uid;
if (!unlikely(wo->wo_flags & WCONTINUED))
return 0;
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
return 0;
spin_lock_irq(&p->sighand->siglock);
/* Re-check with the lock held. */
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
spin_unlock_irq(&p->sighand->siglock);
return 0;
}
if (!unlikely(wo->wo_flags & WNOWAIT))
p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
uid = from_kuid_munged(current_user_ns(), task_uid(p));
spin_unlock_irq(&p->sighand->siglock);
pid = task_pid_vnr(p);
get_task_struct(p);
read_unlock(&tasklist_lock);
if (!wo->wo_info) {
retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
put_task_struct(p);
if (!retval && wo->wo_stat)
retval = put_user(0xffff, wo->wo_stat);
if (!retval)
retval = pid;
} else {
retval = wait_noreap_copyout(wo, p, pid, uid,
CLD_CONTINUED, SIGCONT);
BUG_ON(retval == 0);
}
return retval;
}
/*
* Consider @p for a wait by @parent.
*
* -ECHILD should be in ->notask_error before the first call.
* Returns nonzero for a final return, when we have unlocked tasklist_lock.
* Returns zero if the search for a child should continue;
* then ->notask_error is 0 if @p is an eligible child,
* or another error from security_task_wait(), or still -ECHILD.
*/
static int wait_consider_task(struct wait_opts *wo, int ptrace,
struct task_struct *p)
{
int ret = eligible_child(wo, p);
if (!ret)
return ret;
ret = security_task_wait(p);
if (unlikely(ret < 0)) {
/*
* If we have not yet seen any eligible child,
* then let this error code replace -ECHILD.
* A permission error will give the user a clue
* to look for security policy problems, rather
* than for mysterious wait bugs.
*/
if (wo->notask_error)
wo->notask_error = ret;
return 0;
}
/* dead body doesn't have much to contribute */
if (unlikely(p->exit_state == EXIT_DEAD)) {
/*
* But do not ignore this task until the tracer does
* wait_task_zombie()->do_notify_parent().
*/
if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
wo->notask_error = 0;
return 0;
}
/* slay zombie? */
if (p->exit_state == EXIT_ZOMBIE) {
/*
* A zombie ptracee is only visible to its ptracer.
* Notification and reaping will be cascaded to the real
* parent when the ptracer detaches.
*/
if (likely(!ptrace) && unlikely(p->ptrace)) {
/* it will become visible, clear notask_error */
wo->notask_error = 0;
return 0;
}
/* we don't reap group leaders with subthreads */
if (!delay_group_leader(p))
return wait_task_zombie(wo, p);
/*
* Allow access to stopped/continued state via zombie by
* falling through. Clearing of notask_error is complex.
*
* When !@ptrace:
*
* If WEXITED is set, notask_error should naturally be
* cleared. If not, subset of WSTOPPED|WCONTINUED is set,
* so, if there are live subthreads, there are events to
* wait for. If all subthreads are dead, it's still safe
* to clear - this function will be called again in finite
* amount time once all the subthreads are released and
* will then return without clearing.
*
* When @ptrace:
*
* Stopped state is per-task and thus can't change once the
* target task dies. Only continued and exited can happen.
* Clear notask_error if WCONTINUED | WEXITED.
*/
if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
wo->notask_error = 0;
} else {
/*
* If @p is ptraced by a task in its real parent's group,
* hide group stop/continued state when looking at @p as
* the real parent; otherwise, a single stop can be
* reported twice as group and ptrace stops.
*
* If a ptracer wants to distinguish the two events for its
* own children, it should create a separate process which
* takes the role of real parent.
*/
if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
return 0;
/*
* @p is alive and it's gonna stop, continue or exit, so
* there always is something to wait for.
*/
wo->notask_error = 0;
}
/*
* Wait for stopped. Depending on @ptrace, different stopped state
* is used and the two don't interact with each other.
*/
ret = wait_task_stopped(wo, ptrace, p);
if (ret)
return ret;
/*
* Wait for continued. There's only one continued state and the
* ptracer can consume it which can confuse the real parent. Don't
* use WCONTINUED from ptracer. You don't need or want it.
*/
return wait_task_continued(wo, p);
}
/*
* Do the work of do_wait() for one thread in the group, @tsk.
*
* -ECHILD should be in ->notask_error before the first call.
* Returns nonzero for a final return, when we have unlocked tasklist_lock.
* Returns zero if the search for a child should continue; then
* ->notask_error is 0 if there were any eligible children,
* or another error from security_task_wait(), or still -ECHILD.
*/
static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
{
struct task_struct *p;
list_for_each_entry(p, &tsk->children, sibling) {
int ret = wait_consider_task(wo, 0, p);
if (ret)
return ret;
}
return 0;
}
static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
{
struct task_struct *p;
list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
int ret = wait_consider_task(wo, 1, p);
if (ret)
return ret;
}
return 0;
}
static int child_wait_callback(wait_queue_t *wait, unsigned mode,
int sync, void *key)
{
struct wait_opts *wo = container_of(wait, struct wait_opts,
child_wait);
struct task_struct *p = key;
if (!eligible_pid(wo, p))
return 0;
if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
return 0;
return default_wake_function(wait, mode, sync, key);
}
void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
{
__wake_up_sync_key(&parent->signal->wait_chldexit,
TASK_INTERRUPTIBLE, 1, p);
}
static long do_wait(struct wait_opts *wo)
{
struct task_struct *tsk;
int retval;
trace_sched_process_wait(wo->wo_pid);
init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
wo->child_wait.private = current;
add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
repeat:
/*
* If there is nothing that can match our critiera just get out.
* We will clear ->notask_error to zero if we see any child that
* might later match our criteria, even if we are not able to reap
* it yet.
*/
wo->notask_error = -ECHILD;
if ((wo->wo_type < PIDTYPE_MAX) &&
(!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
goto notask;
set_current_state(TASK_INTERRUPTIBLE);
read_lock(&tasklist_lock);
tsk = current;
do {
retval = do_wait_thread(wo, tsk);
if (retval)
goto end;
retval = ptrace_do_wait(wo, tsk);
if (retval)
goto end;
if (wo->wo_flags & __WNOTHREAD)
break;
} while_each_thread(current, tsk);
read_unlock(&tasklist_lock);
notask:
retval = wo->notask_error;
if (!retval && !(wo->wo_flags & WNOHANG)) {
retval = -ERESTARTSYS;
if (!signal_pending(current)) {
schedule();
goto repeat;
}
}
end:
__set_current_state(TASK_RUNNING);
remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
return retval;
}
SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
infop, int, options, struct rusage __user *, ru)
{
struct wait_opts wo;
struct pid *pid = NULL;
enum pid_type type;
long ret;
if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
return -EINVAL;
if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
return -EINVAL;
switch (which) {
case P_ALL:
type = PIDTYPE_MAX;
break;
case P_PID:
type = PIDTYPE_PID;
if (upid <= 0)
return -EINVAL;
break;
case P_PGID:
type = PIDTYPE_PGID;
if (upid <= 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
if (type < PIDTYPE_MAX)
pid = find_get_pid(upid);
wo.wo_type = type;
wo.wo_pid = pid;
wo.wo_flags = options;
wo.wo_info = infop;
wo.wo_stat = NULL;
wo.wo_rusage = ru;
ret = do_wait(&wo);
if (ret > 0) {
ret = 0;
} else if (infop) {
/*
* For a WNOHANG return, clear out all the fields
* we would set so the user can easily tell the
* difference.
*/
if (!ret)
ret = put_user(0, &infop->si_signo);
if (!ret)
ret = put_user(0, &infop->si_errno);
if (!ret)
ret = put_user(0, &infop->si_code);
if (!ret)
ret = put_user(0, &infop->si_pid);
if (!ret)
ret = put_user(0, &infop->si_uid);
if (!ret)
ret = put_user(0, &infop->si_status);
}
put_pid(pid);
return ret;
}
SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
int, options, struct rusage __user *, ru)
{
struct wait_opts wo;
struct pid *pid = NULL;
enum pid_type type;
long ret;
if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
__WNOTHREAD|__WCLONE|__WALL))
return -EINVAL;
if (upid == -1)
type = PIDTYPE_MAX;
else if (upid < 0) {
type = PIDTYPE_PGID;
pid = find_get_pid(-upid);
} else if (upid == 0) {
type = PIDTYPE_PGID;
pid = get_task_pid(current, PIDTYPE_PGID);
} else /* upid > 0 */ {
type = PIDTYPE_PID;
pid = find_get_pid(upid);
}
wo.wo_type = type;
wo.wo_pid = pid;
wo.wo_flags = options | WEXITED;
wo.wo_info = NULL;
wo.wo_stat = stat_addr;
wo.wo_rusage = ru;
ret = do_wait(&wo);
put_pid(pid);
return ret;
}
#ifdef __ARCH_WANT_SYS_WAITPID
/*
* sys_waitpid() remains for compatibility. waitpid() should be
* implemented by calling sys_wait4() from libc.a.
*/
SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
{
return sys_wait4(pid, stat_addr, options, NULL);
}
#endif
| PennPanda/litmus-rt | kernel/exit.c | C | gpl-2.0 | 44,381 |
//=============================================================================
//
// m25pxx.c
//
// SPI flash driver for Numonyx M25Pxx devices and compatibles.
//
//=============================================================================
// ####ECOSGPLCOPYRIGHTBEGIN####
// -------------------------------------------
// This file is part of eCos, the Embedded Configurable Operating System.
// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// eCos is free software; you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 or (at your option) any later
// version.
//
// eCos is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// for more details.
//
// You should have received a copy of the GNU General Public License
// along with eCos; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//
// As a special exception, if other files instantiate templates or use
// macros or inline functions from this file, or you compile this file
// and link it with other works to produce a work based on this file,
// this file does not by itself cause the resulting work to be covered by
// the GNU General Public License. However the source code for this file
// must still be made available in accordance with section (3) of the GNU
// General Public License v2.
//
// This exception does not invalidate any other reasons why a work based
// on this file might be covered by the GNU General Public License.
// -------------------------------------------
// ####ECOSGPLCOPYRIGHTEND####
//=============================================================================
//#####DESCRIPTIONBEGIN####
//
// Author(s): Chris Holgate
// Date: 2008-12-22
// Purpose: Numonyx M25Pxx SPI flash driver implementation
//
//####DESCRIPTIONEND####
//
//=============================================================================
#include <cyg/io/spi.h>
#include <cyg/io/flash.h>
#include <cyg/io/flash_dev.h>
#include <cyg/infra/cyg_type.h>
#include <cyg/infra/cyg_ass.h>
#include <pkgconf/devs_flash_spi_m25pxx.h>
#include <string.h>
//-----------------------------------------------------------------------------
// Enable polled SPI operation for non-kernel builds.
#ifdef CYGPKG_KERNEL
#define M25PXX_POLLED false
#else
#define M25PXX_POLLED true
#endif
//-----------------------------------------------------------------------------
// Implement delay functions for kernel and non-kernel builds. The kernel
// build assumes that the API calls are made in the thread context.
#ifdef CYGPKG_KERNEL
#define M25PXX_DELAY_MS(_msdelay_) cyg_thread_delay (\
1 + ((1000 * _msdelay_ * CYGNUM_HAL_RTC_DENOMINATOR) / (CYGNUM_HAL_RTC_NUMERATOR / 1000)))
#else
#define M25PXX_DELAY_MS(_msdelay_) CYGACC_CALL_IF_DELAY_US (_msdelay_ * 1000)
#endif
//-----------------------------------------------------------------------------
// Maintenance and debug macros.
#define TODO_M25P(_msg_) CYG_ASSERT(false, "TODO (M25P) : " _msg_)
#define FAIL_M25P(_msg_) CYG_ASSERT(false, "FAIL (M25P) : " _msg_)
#define ASSERT_M25P(_test_, _msg_) CYG_ASSERT(_test_, "FAIL (M25P) : " _msg_)
#define TRACE_M25P(_msg_, _args_...) if (dev->pf) dev->pf ("M25PXX : " _msg_, ##_args_)
//=============================================================================
// Define M25Pxx SPI protocol.
//=============================================================================
typedef enum m25pxx_cmd {
M25PXX_CMD_WREN = 0x06, // Write enable.
M25PXX_CMD_WDRI = 0x04, // Write disable.
M25PXX_CMD_RDID = 0x9F, // Read identification.
M25PXX_CMD_RDSR = 0x05, // Read status register.
M25PXX_CMD_WRSR = 0x01, // Write status register.
M25PXX_CMD_READ = 0x03, // Read data.
M25PXX_CMD_FREAD = 0x0B, // Read data (fast transaction).
M25PXX_CMD_PP = 0x02, // Page program.
M25PXX_CMD_SE = 0x20, //0xD8, // Sector erase. // Modified for MX25L1606E by reille 2013.05.26
M25PXX_CMD_BE = 0xC7, // Bulk erase.
M25PXX_CMD_RES = 0xAB, // Read electronic signature.
} m25pxx_cmd;
// Status register bitfields.
#define M25PXX_STATUS_WIP 0x01 /* Write in progress. */
#define M25PXX_STATUS_WEL 0x02 /* Write enable latch. */
#define M25PXX_STATUS_BP0 0x04 /* Block protect 0. */
#define M25PXX_STATUS_BP1 0x08 /* Block protect 1. */
#define M25PXX_STATUS_BP2 0x10 /* Block protect 2. */
#define M25PXX_STATUS_SRWD 0x80 /* Status register write protect. */
// Page size of 256 bytes appears to be common for all devices.
#define M25PXX_PAGE_SIZE 256
//=============================================================================
// Array containing a list of supported devices. This allows the device
// parameters to be dynamically detected on initialisation.
//=============================================================================
typedef struct m25pxx_params {
cyg_uint16 sector_size; // Number of pages in a sector.
cyg_uint16 sector_count; // Number of sectors on device.
cyg_uint32 jedec_id; // 3 byte JEDEC identifier for this device.
} m25pxx_params;
static const m25pxx_params m25pxx_supported_devices [] = {
{ // Support for Numonyx 128 MBit devices.
sector_size : 1024,
sector_count : 64,
jedec_id : 0x00202018
},
{ // Support for Numonyx 64 MBit devices.
sector_size : 256,
sector_count : 128,
jedec_id : 0x00202017
},
{ // Support for Numonyx 16 MBit devices.
sector_size : 256,
sector_count : 64,
jedec_id : 0x00202016
},
{ // Support for Numonyx 16 MBit devices.
sector_size : 16, // 256, // Modified for MX25L1606E by reille 2013.05.26
sector_count : 512, // 32, // Modified for MX25L1606E by reille 2013.05.26
jedec_id : 0x00C22015, // 0x00202015 // Modified for MX25L1606E by reille 2013.05.26
},
{ // Support for Numonyx 8 MBit devices.
sector_size : 256,
sector_count : 16,
jedec_id : 0x00202014
},
{ // Support for Numonyx 4 MBit devices.
sector_size : 256,
sector_count : 8,
jedec_id : 0x00202013
},
{ // Support for Numonyx 2 MBit devices.
sector_size : 256,
sector_count : 4,
jedec_id : 0x00202012
},
{ // Support for Numonyx 1 MBit devices.
sector_size : 128,
sector_count : 4,
jedec_id : 0x00202011
},
{ // Support for Numonyx 512 KBit devices.
sector_size : 128,
sector_count : 2,
jedec_id : 0x00202010
},
{ // Null terminating entry.
sector_size : 0,
sector_count : 0,
jedec_id : 0
}
};
//=============================================================================
// Utility functions for address calculations.
//=============================================================================
//-----------------------------------------------------------------------------
// Strips out any device address offset to give address within device.
static cyg_bool m25pxx_to_local_addr
(struct cyg_flash_dev* dev, cyg_flashaddr_t* addr)
{
cyg_bool retval = false;
// Range check address before modifying it.
if ((*addr >= dev->start) && (*addr <= dev->end)) {
*addr -= dev->start;
retval = true;
}
return retval;
}
//=============================================================================
// Wrapper functions for various SPI transactions.
//=============================================================================
//-----------------------------------------------------------------------------
// Read back the 3-byte JEDEC ID, returning it as a 32-bit integer.
// This function is called during flash initialisation, which can often be
// called from the startup/idle thread. This means that we should always use
// SPI polled mode in order to prevent the thread from attempting to sleep.
static inline cyg_uint32 m25pxx_spi_rdid
(struct cyg_flash_dev *dev)
{
cyg_spi_device* spi_device = (cyg_spi_device*) dev->priv;
const cyg_uint8 tx_buf [4] = { M25PXX_CMD_RDID, 0, 0, 0 };
cyg_uint8 rx_buf [4];
cyg_uint32 retval = 0;
// Carry out SPI transfer.
cyg_spi_transfer (spi_device, true, 4, tx_buf, rx_buf);
// Convert 3-byte ID to 32-bit integer.
retval |= ((cyg_uint32) rx_buf[1]) << 16;
retval |= ((cyg_uint32) rx_buf[2]) << 8;
retval |= ((cyg_uint32) rx_buf[3]);
return retval;
}
//-----------------------------------------------------------------------------
// Send write enable command.
static inline void m25pxx_spi_wren
(struct cyg_flash_dev *dev)
{
cyg_spi_device* spi_device = (cyg_spi_device*) dev->priv;
const cyg_uint8 tx_buf [1] = { M25PXX_CMD_WREN };
cyg_spi_transfer (spi_device, M25PXX_POLLED, 1, tx_buf, NULL);
}
//-----------------------------------------------------------------------------
// Send sector erase command. The address parameter is a device local address
// within the sector to be erased.
static inline void m25pxx_spi_se
(struct cyg_flash_dev *dev, cyg_flashaddr_t addr)
{
cyg_spi_device* spi_device = (cyg_spi_device*) dev->priv;
const cyg_uint8 tx_buf [4] = { M25PXX_CMD_SE,
(cyg_uint8) (addr >> 16), (cyg_uint8) (addr >> 8), (cyg_uint8) (addr) };
cyg_spi_transfer (spi_device, M25PXX_POLLED, 4, tx_buf, NULL);
}
//-----------------------------------------------------------------------------
// Read and return the 8-bit device status register.
static inline cyg_uint8 m25pxx_spi_rdsr
(struct cyg_flash_dev *dev)
{
cyg_spi_device* spi_device = (cyg_spi_device*) dev->priv;
const cyg_uint8 tx_buf [2] = { M25PXX_CMD_RDSR, 0 };
cyg_uint8 rx_buf [2];
// Carry out SPI transfer and return the status byte.
cyg_spi_transfer (spi_device, M25PXX_POLLED, 2, tx_buf, rx_buf);
return rx_buf [1];
}
//-----------------------------------------------------------------------------
// Program a single page.
static inline void m25pxx_spi_pp
(struct cyg_flash_dev *dev, cyg_flashaddr_t addr, cyg_uint8* wbuf, cyg_uint32 wbuf_len)
{
cyg_spi_device* spi_device = (cyg_spi_device*) dev->priv;
const cyg_uint8 tx_buf [4] = { M25PXX_CMD_PP,
(cyg_uint8) (addr >> 16), (cyg_uint8) (addr >> 8), (cyg_uint8) (addr) };
// Implement the program operation as a multistage SPI transaction.
cyg_spi_transaction_begin (spi_device);
cyg_spi_transaction_transfer (spi_device, M25PXX_POLLED, 4, tx_buf, NULL, false);
cyg_spi_transaction_transfer (spi_device, M25PXX_POLLED, wbuf_len, wbuf, NULL, false);
cyg_spi_transaction_end (spi_device);
}
//-----------------------------------------------------------------------------
// Implement fast reads to the specified buffer.
static inline void m25pxx_spi_fread
(struct cyg_flash_dev *dev, cyg_flashaddr_t addr, cyg_uint8* rbuf, cyg_uint32 rbuf_len)
{
cyg_spi_device* spi_device = (cyg_spi_device*) dev->priv;
const cyg_uint8 tx_buf [5] = { M25PXX_CMD_FREAD,
(cyg_uint8) (addr >> 16), (cyg_uint8) (addr >> 8), (cyg_uint8) (addr), 0 };
// Implement the read operation as a multistage SPI transaction.
cyg_spi_transaction_begin (spi_device);
cyg_spi_transaction_transfer (spi_device, M25PXX_POLLED, 5, tx_buf, NULL, false);
cyg_spi_transaction_transfer (spi_device, M25PXX_POLLED, rbuf_len, NULL, rbuf, false);
cyg_spi_transaction_end (spi_device);
}
//=============================================================================
// Standard Flash device API. All the following functions assume that a valid
// SPI device handle is passed in the 'priv' reference of the flash device
// data structure.
//=============================================================================
//-----------------------------------------------------------------------------
// Initialise the SPI flash, reading back the flash parameters.
static int m25pxx_init
(struct cyg_flash_dev *dev)
{
m25pxx_params* dev_params = (m25pxx_params*) m25pxx_supported_devices;
cyg_uint32 device_id;
int retval = FLASH_ERR_INVALID;
// Find the device in the supported devices list.
device_id = m25pxx_spi_rdid (dev);
while ((dev_params->jedec_id != 0) && (dev_params->jedec_id != device_id)) {
dev_params ++;
}
// Found supported device - update device parameters. M25PXX devices have a
// uniform sector distribution, so only 1 block info record is required.
if (dev_params->jedec_id != 0) {
ASSERT_M25P (dev->num_block_infos == 1, "Only 1 block info record required.");
ASSERT_M25P (dev->block_info != NULL, "Null pointer to block info record.");
if ((dev->num_block_infos == 1) && (dev->block_info != NULL)) {
TRACE_M25P ("Init device with JEDEC ID 0x%06X.\n", device_id);
dev->end = dev->start + (M25PXX_PAGE_SIZE * (cyg_flashaddr_t) dev_params->sector_size *
(cyg_flashaddr_t) dev_params->sector_count) - 1;
// Strictly speaking the block info fields are 'read only'. However, we
// have a legitimate reason for updating the contents here and can cast
// away the const.
((cyg_flash_block_info_t*) dev->block_info)->block_size =
M25PXX_PAGE_SIZE * (size_t) dev_params->sector_size;
((cyg_flash_block_info_t*) dev->block_info)->blocks =
(cyg_uint32) dev_params->sector_count;
retval = FLASH_ERR_OK;
}
}
return retval;
}
//-----------------------------------------------------------------------------
// Erase a single sector of the flash.
static int m25pxx_erase_block
(struct cyg_flash_dev *dev, cyg_flashaddr_t block_base)
{
cyg_flashaddr_t local_base = block_base;
int retval = FLASH_ERR_INVALID;
cyg_uint8 dev_status;
// Fix up the block address and send the sector erase command.
if (m25pxx_to_local_addr (dev, &local_base)) {
m25pxx_spi_wren (dev);
m25pxx_spi_se (dev, local_base);
// Spin waiting for the erase to complete. This can take between 1 and 3
// seconds, so we use a polling interval of 1/2 sec.
do {
M25PXX_DELAY_MS (500);
dev_status = m25pxx_spi_rdsr (dev);
} while (dev_status & M25PXX_STATUS_WIP);
retval = FLASH_ERR_OK;
}
return retval;
}
//-----------------------------------------------------------------------------
// Program an arbitrary number of pages into flash and verify written data.
static int m25pxx_program
(struct cyg_flash_dev *dev, cyg_flashaddr_t base, const void* data, size_t len)
{
cyg_flashaddr_t local_base = base;
int retval = FLASH_ERR_OK;
cyg_uint8* tx_ptr = (cyg_uint8*) data;
cyg_uint32 tx_bytes_left = (cyg_uint32) len;
cyg_uint32 tx_bytes;
cyg_uint8 dev_status;
// Fix up the block address.
if (!m25pxx_to_local_addr (dev, &local_base)) {
retval = FLASH_ERR_INVALID;
goto out;
}
// The start of the transaction may not be page aligned, so we need to work
// out how many bytes to transmit before we hit the first page boundary.
tx_bytes = M25PXX_PAGE_SIZE - (((cyg_uint32) local_base) & (M25PXX_PAGE_SIZE - 1));
if (tx_bytes > tx_bytes_left) tx_bytes = tx_bytes_left;
// Perform page program operations.
while (tx_bytes_left) {
m25pxx_spi_wren (dev);
m25pxx_spi_pp (dev, local_base, tx_ptr, tx_bytes);
// Spin waiting for write to complete. This can take up to 5ms, so
// we use a polling interval of 1ms - which may get rounded up to the
// RTC tick granularity.
do {
M25PXX_DELAY_MS (1);
dev_status = m25pxx_spi_rdsr (dev);
} while (dev_status & M25PXX_STATUS_WIP);
// Update counters and data pointers for the next page.
tx_bytes_left -= tx_bytes;
tx_ptr += tx_bytes;
local_base += tx_bytes;
tx_bytes = (tx_bytes_left > M25PXX_PAGE_SIZE) ? M25PXX_PAGE_SIZE : tx_bytes_left;
}
out:
return retval;
}
//-----------------------------------------------------------------------------
// Read back an arbitrary amount of data from flash.
static int m25pxx_read
(struct cyg_flash_dev *dev, const cyg_flashaddr_t base, void* data, size_t len)
{
cyg_flashaddr_t local_base = base;
int retval = FLASH_ERR_INVALID;
cyg_uint8* rx_ptr = (cyg_uint8*) data;
cyg_uint32 rx_bytes_left = (cyg_uint32) len;
cyg_uint32 rx_bytes;
// Determine the maximum transfer size to use.
cyg_uint32 rx_block_size = (CYGNUM_DEVS_FLASH_SPI_M25PXX_READ_BLOCK_SIZE == 0) ?
0xFFFFFFFF : CYGNUM_DEVS_FLASH_SPI_M25PXX_READ_BLOCK_SIZE;
// Fix up the block address and fill the read buffer.
if (m25pxx_to_local_addr (dev, &local_base)) {
while (rx_bytes_left) {
rx_bytes = (rx_bytes_left < rx_block_size) ? rx_bytes_left : rx_block_size;
m25pxx_spi_fread (dev, local_base, rx_ptr, rx_bytes);
// Update counters and data pointers for next read block.
rx_bytes_left -= rx_bytes;
rx_ptr += rx_bytes;
local_base += rx_bytes;
}
retval = FLASH_ERR_OK;
}
return retval;
}
//=============================================================================
// Fill in the driver data structures.
//=============================================================================
CYG_FLASH_FUNS (
cyg_devs_flash_spi_m25pxx_funs, // Exported name of function pointers.
m25pxx_init, // Flash initialisation.
cyg_flash_devfn_query_nop, // Query operations not supported.
m25pxx_erase_block, // Sector erase.
m25pxx_program, // Program multiple pages.
m25pxx_read, // Read arbitrary amount of data.
cyg_flash_devfn_lock_nop, // Locking not supported (no per-sector locks).
cyg_flash_devfn_unlock_nop
);
//=============================================================================
| reille/proj_ecos | src/ecos/packages/devs/flash/spi/m25pxx/current/src/m25pxx.c | C | gpl-2.0 | 17,781 |
/*
* drivers/power/process.c - Functions for starting/stopping processes on
* suspend transitions.
*
* Originally from swsusp.
*/
#undef DEBUG
#include <linux/interrupt.h>
#include <linux/oom.h>
#include <linux/suspend.h>
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/kmod.h>
#include <linux/wakelock.h>
#include "power.h"
/*
* Timeout for stopping processes
*/
#define TIMEOUT (20 * HZ)
static int try_to_freeze_tasks(bool user_only)
{
struct task_struct *g, *p;
unsigned long end_time;
unsigned int todo;
bool wq_busy = false;
struct timeval start, end;
u64 elapsed_csecs64;
unsigned int elapsed_csecs;
bool wakeup = false;
do_gettimeofday(&start);
end_time = jiffies + TIMEOUT;
if (!user_only)
freeze_workqueues_begin();
while (true) {
todo = 0;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
if (p == current || !freeze_task(p))
continue;
/*
* Now that we've done set_freeze_flag, don't
* perturb a task in TASK_STOPPED or TASK_TRACED.
* It is "frozen enough". If the task does wake
* up, it will immediately call try_to_freeze.
*
* Because freeze_task() goes through p's scheduler lock, it's
* guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
* transition can't race with task state testing here.
*/
if (!task_is_stopped_or_traced(p) &&
!freezer_should_skip(p))
todo++;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
if (!user_only) {
wq_busy = freeze_workqueues_busy();
todo += wq_busy;
}
if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
wakeup = 1;
break;
}
if (!todo || time_after(jiffies, end_time))
break;
if (pm_wakeup_pending()) {
wakeup = true;
break;
}
/*
* We need to retry, but first give the freezing tasks some
* time to enter the regrigerator.
*/
msleep(10);
}
do_gettimeofday(&end);
elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
elapsed_csecs = elapsed_csecs64;
if (todo) {
/* This does not unfreeze processes that are already frozen
* (we have slightly ugly calling convention in that respect,
* and caller must call thaw_processes() if something fails),
* but it cleans up leftover PF_FREEZE requests.
*/
if(wakeup) {
printk("\n");
printk(KERN_ERR "Freezing of %s aborted\n",
user_only ? "user space " : "tasks ");
}
else {
printk("\n");
printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n",
wakeup ? "aborted" : "failed",
elapsed_csecs / 100, elapsed_csecs % 100,
todo - wq_busy, wq_busy);
}
if (!wakeup) {
read_lock(&tasklist_lock);
do_each_thread(g, p) {
if (p != current && !freezer_should_skip(p)
&& freezing(p) && !frozen(p) &&
elapsed_csecs > 100)
sched_show_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
} else {
printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100,
elapsed_csecs % 100);
}
return todo ? -EBUSY : 0;
}
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
*
* On success, returns 0. On failure, -errno and system is fully thawed.
*/
int freeze_processes(void)
{
int error;
error = suspend_sys_sync_wait();
if (error)
return error;
error = __usermodehelper_disable(UMH_FREEZING);
if (error)
return error;
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
printk("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error) {
printk("done.");
__usermodehelper_set_disable_depth(UMH_DISABLED);
oom_killer_disable();
}
printk("\n");
BUG_ON(in_atomic());
if (error)
thaw_processes();
return error;
}
/**
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
*
* On success, returns 0. On failure, -errno and only the kernel threads are
* thawed, so as to give a chance to the caller to do additional cleanups
* (if any) before thawing the userspace tasks. So, it is the responsibility
* of the caller to thaw the userspace tasks, when the time is right.
*/
int freeze_kernel_threads(void)
{
int error;
// error = suspend_sys_sync_wait();
// if (error)
// return error;
printk("Freezing remaining freezable tasks ... ");
pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
printk("done.");
printk("\n");
BUG_ON(in_atomic());
if (error)
thaw_kernel_threads();
return error;
}
void thaw_processes(void)
{
struct task_struct *g, *p;
if (pm_freezing)
atomic_dec(&system_freezing_cnt);
pm_freezing = false;
pm_nosig_freezing = false;
oom_killer_enable();
printk("Restarting tasks ... ");
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
__thaw_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
usermodehelper_enable();
schedule();
printk("done.\n");
}
void thaw_kernel_threads(void)
{
struct task_struct *g, *p;
pm_nosig_freezing = false;
printk("Restarting kernel threads ... ");
thaw_workqueues();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
__thaw_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
schedule();
printk("done.\n");
}
| forfivo/v500_kernel_aosp | kernel/power/process.c | C | gpl-2.0 | 5,537 |
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/msm_tsens.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/msm_tsens.h>
#include <linux/msm_thermal.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/thermal.h>
#include <linux/regulator/rpm-smd-regulator.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/msm_thermal_ioctl.h>
#include <soc/qcom/rpm-smd.h>
#include <soc/qcom/scm.h>
#include <linux/sched/rt.h>
#define CREATE_TRACE_POINTS
#define TRACE_MSM_THERMAL
#include <trace/trace_thermal.h>
#define MAX_CURRENT_UA 100000
#define MAX_RAILS 5
#define MAX_THRESHOLD 2
#define MONITOR_ALL_TSENS -1
#define TSENS_NAME_MAX 20
#define TSENS_NAME_FORMAT "tsens_tz_sensor%d"
#define THERM_SECURE_BITE_CMD 8
#define SENSOR_SCALING_FACTOR 1
#define CPU_DEVICE "cpu%d"
#define POLLING_DELAY 100
unsigned int temp_threshold = 60;
module_param(temp_threshold, int, 0755);
static struct msm_thermal_data msm_thermal_info;
static struct delayed_work check_temp_work;
static bool core_control_enabled;
static uint32_t cpus_offlined;
static DEFINE_MUTEX(core_control_mutex);
static struct kobject *cc_kobj;
static struct kobject *mx_kobj;
static struct task_struct *hotplug_task;
static struct task_struct *freq_mitigation_task;
static struct task_struct *thermal_monitor_task;
static struct completion hotplug_notify_complete;
static struct completion freq_mitigation_complete;
static struct completion thermal_monitor_complete;
static int enabled;
static int polling_enabled;
static int rails_cnt;
static int sensor_cnt;
static int psm_rails_cnt;
static int ocr_rail_cnt;
static int limit_idx;
static int limit_idx_low;
static int limit_idx_high;
static int max_tsens_num;
static struct cpufreq_frequency_table *table;
static uint32_t usefreq;
static int freq_table_get;
static bool vdd_rstr_enabled;
static bool vdd_rstr_nodes_called;
static bool vdd_rstr_probed;
static bool sensor_info_nodes_called;
static bool sensor_info_probed;
static bool psm_enabled;
static bool psm_nodes_called;
static bool psm_probed;
static bool freq_mitigation_enabled;
static bool ocr_enabled;
static bool ocr_nodes_called;
static bool ocr_probed;
static bool ocr_reg_init_defer;
static bool hotplug_enabled;
static bool interrupt_mode_enable;
static bool msm_thermal_probed;
static bool gfx_crit_phase_ctrl_enabled;
static bool gfx_warm_phase_ctrl_enabled;
static bool cx_phase_ctrl_enabled;
static bool vdd_mx_enabled;
static bool therm_reset_enabled;
static bool online_core;
static bool cluster_info_probed;
static bool cluster_info_nodes_called;
static int *tsens_id_map;
static DEFINE_MUTEX(vdd_rstr_mutex);
static DEFINE_MUTEX(psm_mutex);
static DEFINE_MUTEX(cx_mutex);
static DEFINE_MUTEX(gfx_mutex);
static DEFINE_MUTEX(ocr_mutex);
static DEFINE_MUTEX(vdd_mx_mutex);
static uint32_t min_freq_limit;
static uint32_t curr_gfx_band;
static uint32_t curr_cx_band;
static struct kobj_attribute cx_mode_attr;
static struct kobj_attribute gfx_mode_attr;
static struct kobj_attribute mx_enabled_attr;
static struct attribute_group cx_attr_gp;
static struct attribute_group gfx_attr_gp;
static struct attribute_group mx_attr_group;
static struct regulator *vdd_mx;
static struct cpufreq_frequency_table *pending_freq_table_ptr;
static int pending_cpu_freq = -1;
static long *tsens_temp_at_panic;
static LIST_HEAD(devices_list);
enum thermal_threshold {
HOTPLUG_THRESHOLD_HIGH,
HOTPLUG_THRESHOLD_LOW,
FREQ_THRESHOLD_HIGH,
FREQ_THRESHOLD_LOW,
THRESHOLD_MAX_NR,
};
enum sensor_id_type {
THERM_ZONE_ID,
THERM_TSENS_ID,
THERM_ID_MAX_NR,
};
struct cluster_info {
int cluster_id;
uint32_t entity_count;
struct cluster_info *child_entity_ptr;
struct cluster_info *parent_ptr;
struct cpufreq_frequency_table *freq_table;
int freq_idx;
int freq_idx_low;
int freq_idx_high;
cpumask_t cluster_cores;
bool sync_cluster;
uint32_t limited_max_freq;
uint32_t limited_min_freq;
};
struct cpu_info {
uint32_t cpu;
const char *sensor_type;
enum sensor_id_type id_type;
uint32_t sensor_id;
bool offline;
bool user_offline;
bool hotplug_thresh_clear;
struct sensor_threshold threshold[THRESHOLD_MAX_NR];
bool max_freq;
uint32_t user_max_freq;
uint32_t user_min_freq;
uint32_t limited_max_freq;
uint32_t limited_min_freq;
bool freq_thresh_clear;
struct cluster_info *parent_ptr;
};
struct threshold_info;
struct therm_threshold {
int32_t sensor_id;
enum sensor_id_type id_type;
struct sensor_threshold threshold[MAX_THRESHOLD];
int32_t trip_triggered;
void (*notify)(struct therm_threshold *);
struct threshold_info *parent;
};
struct threshold_info {
uint32_t thresh_ct;
bool thresh_triggered;
struct therm_threshold *thresh_list;
};
struct rail {
const char *name;
uint32_t freq_req;
uint32_t min_level;
uint32_t num_levels;
int32_t curr_level;
uint32_t levels[3];
struct kobj_attribute value_attr;
struct kobj_attribute level_attr;
struct regulator *reg;
struct attribute_group attr_gp;
};
struct msm_sensor_info {
const char *name;
const char *alias;
const char *type;
uint32_t scaling_factor;
};
struct psm_rail {
const char *name;
uint8_t init;
uint8_t mode;
struct kobj_attribute mode_attr;
struct rpm_regulator *reg;
struct regulator *phase_reg;
struct attribute_group attr_gp;
};
struct devmgr_devices {
struct device_manager_data *hotplug_dev;
struct device_manager_data *cpufreq_dev[NR_CPUS];
};
enum msm_thresh_list {
MSM_THERM_RESET,
MSM_VDD_RESTRICTION,
MSM_CX_PHASE_CTRL_HOT,
MSM_GFX_PHASE_CTRL_WARM,
MSM_GFX_PHASE_CTRL_HOT,
MSM_OCR,
MSM_VDD_MX_RESTRICTION,
MSM_LIST_MAX_NR,
};
enum msm_thermal_phase_ctrl {
MSM_CX_PHASE_CTRL,
MSM_GFX_PHASE_CTRL,
MSM_PHASE_CTRL_NR,
};
enum msm_temp_band {
MSM_COLD_CRITICAL = 1,
MSM_COLD,
MSM_COOL,
MSM_NORMAL,
MSM_WARM,
MSM_HOT,
MSM_HOT_CRITICAL,
MSM_TEMP_MAX_NR,
};
static struct psm_rail *psm_rails;
static struct psm_rail *ocr_rails;
static struct rail *rails;
static struct msm_sensor_info *sensors;
static struct cpu_info cpus[NR_CPUS];
static struct threshold_info *thresh;
static bool mx_restr_applied;
static struct cluster_info *core_ptr;
static struct devmgr_devices *devices;
struct vdd_rstr_enable {
struct kobj_attribute ko_attr;
uint32_t enabled;
};
/* For SMPS only*/
enum PMIC_SW_MODE {
PMIC_AUTO_MODE = RPM_REGULATOR_MODE_AUTO,
PMIC_IPEAK_MODE = RPM_REGULATOR_MODE_IPEAK,
PMIC_PWM_MODE = RPM_REGULATOR_MODE_HPM,
};
enum ocr_request {
OPTIMUM_CURRENT_MIN,
OPTIMUM_CURRENT_MAX,
OPTIMUM_CURRENT_NR,
};
#define SYNC_CORE(_cpu) \
(core_ptr && cpus[_cpu].parent_ptr->sync_cluster)
#define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \
ko_attr.attr.name = __stringify(_name); \
ko_attr.attr.mode = 0444; \
ko_attr.show = vdd_rstr_reg_##_name##_show; \
ko_attr.store = NULL; \
sysfs_attr_init(&ko_attr.attr); \
_rail.attr_gp.attrs[j] = &ko_attr.attr;
#define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \
ko_attr.attr.name = __stringify(_name); \
ko_attr.attr.mode = 0644; \
ko_attr.show = vdd_rstr_reg_##_name##_show; \
ko_attr.store = vdd_rstr_reg_##_name##_store; \
sysfs_attr_init(&ko_attr.attr); \
_rail.attr_gp.attrs[j] = &ko_attr.attr;
#define VDD_RSTR_ENABLE_FROM_ATTRIBS(attr) \
(container_of(attr, struct vdd_rstr_enable, ko_attr));
#define VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr) \
(container_of(attr, struct rail, value_attr));
#define VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr) \
(container_of(attr, struct rail, level_attr));
#define OCR_RW_ATTRIB(_rail, ko_attr, j, _name) \
ko_attr.attr.name = __stringify(_name); \
ko_attr.attr.mode = 0644; \
ko_attr.show = ocr_reg_##_name##_show; \
ko_attr.store = ocr_reg_##_name##_store; \
sysfs_attr_init(&ko_attr.attr); \
_rail.attr_gp.attrs[j] = &ko_attr.attr;
#define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \
ko_attr.attr.name = __stringify(_name); \
ko_attr.attr.mode = 0644; \
ko_attr.show = psm_reg_##_name##_show; \
ko_attr.store = psm_reg_##_name##_store; \
sysfs_attr_init(&ko_attr.attr); \
_rail.attr_gp.attrs[j] = &ko_attr.attr;
#define PSM_REG_MODE_FROM_ATTRIBS(attr) \
(container_of(attr, struct psm_rail, mode_attr));
#define PHASE_RW_ATTR(_phase, _name, _attr, j, _attr_gr) \
_attr.attr.name = __stringify(_name); \
_attr.attr.mode = 0644; \
_attr.show = _phase##_phase_show; \
_attr.store = _phase##_phase_store; \
sysfs_attr_init(&_attr.attr); \
_attr_gr.attrs[j] = &_attr.attr;
#define MX_RW_ATTR(ko_attr, _name, _attr_gp) \
ko_attr.attr.name = __stringify(_name); \
ko_attr.attr.mode = 0644; \
ko_attr.show = show_mx_##_name; \
ko_attr.store = store_mx_##_name; \
sysfs_attr_init(&ko_attr.attr); \
_attr_gp.attrs[0] = &ko_attr.attr;
static struct device_manager_data *find_device_by_name(const char *device_name)
{
struct device_manager_data *dev_mgr = NULL;
list_for_each_entry(dev_mgr, &devices_list, dev_ptr) {
if (strcmp(dev_mgr->device_name, device_name) == 0)
return dev_mgr;
}
return NULL;
}
static int validate_client(struct device_clnt_data *clnt)
{
int ret = 0;
struct device_manager_data *dev_mgr = NULL;
struct device_clnt_data *client_ptr = NULL;
if (!clnt || !clnt->dev_mgr) {
pr_err("Invalid client\n");
ret = -EINVAL;
goto validate_exit;
}
list_for_each_entry(dev_mgr, &devices_list, dev_ptr) {
if (dev_mgr == clnt->dev_mgr)
break;
}
if (dev_mgr != clnt->dev_mgr) {
pr_err("Invalid device manager\n");
ret = -EINVAL;
goto validate_exit;
}
mutex_lock(&dev_mgr->clnt_lock);
list_for_each_entry(client_ptr, &dev_mgr->client_list, clnt_ptr) {
if (clnt == client_ptr)
break;
}
if (clnt != client_ptr) {
pr_err("Invalid client\n");
ret = -EINVAL;
goto validate_unlock;
}
validate_unlock:
mutex_unlock(&dev_mgr->clnt_lock);
validate_exit:
return ret;
}
static int devmgr_client_cpufreq_update(struct device_manager_data *dev_mgr)
{
int ret = 0;
struct device_clnt_data *clnt = NULL;
uint32_t max_freq = UINT_MAX;
uint32_t min_freq = 0;
mutex_lock(&dev_mgr->clnt_lock);
list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) {
if (!clnt->req_active)
continue;
max_freq = min(max_freq, clnt->request.freq.max_freq);
min_freq = max(min_freq, clnt->request.freq.min_freq);
}
if (dev_mgr->active_req.freq.max_freq == max_freq &&
dev_mgr->active_req.freq.min_freq == min_freq) {
goto update_exit;
}
dev_mgr->active_req.freq.max_freq = max_freq;
dev_mgr->active_req.freq.min_freq = min_freq;
if (freq_mitigation_task) {
complete(&freq_mitigation_complete);
} else {
pr_err("Frequency mitigation task is not initialized\n");
ret = -ESRCH;
}
update_exit:
mutex_unlock(&dev_mgr->clnt_lock);
return ret;
}
static int devmgr_client_hotplug_update(struct device_manager_data *dev_mgr)
{
int ret = 0;
struct device_clnt_data *clnt = NULL;
cpumask_t offline_mask = CPU_MASK_NONE;
mutex_lock(&dev_mgr->clnt_lock);
list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) {
if (!clnt->req_active)
continue;
cpumask_or(&offline_mask, &offline_mask,
&clnt->request.offline_mask);
}
if (cpumask_equal(&dev_mgr->active_req.offline_mask, &offline_mask))
goto update_exit;
cpumask_copy(&dev_mgr->active_req.offline_mask, &offline_mask);
if (hotplug_task) {
complete(&hotplug_notify_complete);
} else {
pr_err("Hotplug task is not initialized\n");
ret = -ESRCH;
}
update_exit:
mutex_unlock(&dev_mgr->clnt_lock);
return ret;
}
static int devmgr_hotplug_client_request_validate_and_update(
struct device_clnt_data *clnt,
union device_request *req,
enum device_req_type type)
{
if (type != HOTPLUG_MITIGATION_REQ)
return -EINVAL;
cpumask_copy(&clnt->request.offline_mask, &req->offline_mask);
if (!cpumask_empty(&req->offline_mask))
clnt->req_active = true;
else
clnt->req_active = false;
return 0;
}
static int devmgr_cpufreq_client_request_validate_and_update(
struct device_clnt_data *clnt,
union device_request *req,
enum device_req_type type)
{
if (type != CPUFREQ_MITIGATION_REQ)
return -EINVAL;
if (req->freq.max_freq < req->freq.min_freq) {
pr_err("Invalid Max and Min freq req. max:%u min:%u\n",
req->freq.max_freq, req->freq.min_freq);
return -EINVAL;
}
clnt->request.freq.max_freq = req->freq.max_freq;
clnt->request.freq.min_freq = req->freq.min_freq;
if ((req->freq.max_freq == CPUFREQ_MAX_NO_MITIGATION) &&
(req->freq.min_freq == CPUFREQ_MIN_NO_MITIGATION))
clnt->req_active = false;
else
clnt->req_active = true;
return 0;
}
int devmgr_client_request_mitigation(struct device_clnt_data *clnt,
enum device_req_type type,
union device_request *req)
{
int ret = 0;
struct device_manager_data *dev_mgr = NULL;
if (!clnt || !req) {
pr_err("Invalid inputs for mitigation.\n");
ret = -EINVAL;
goto req_exit;
}
ret = validate_client(clnt);
if (ret) {
pr_err("Invalid mitigation client. ret:%d\n", ret);
goto req_exit;
}
if (!clnt->dev_mgr->request_validate) {
pr_err("Invalid dev mgr request update\n");
ret = -EINVAL;
goto req_exit;
}
dev_mgr = clnt->dev_mgr;
mutex_lock(&dev_mgr->clnt_lock);
ret = dev_mgr->request_validate(clnt, req, type);
if (ret) {
pr_err("Invalid client request\n");
goto req_unlock;
}
req_unlock:
mutex_unlock(&dev_mgr->clnt_lock);
if (!ret && dev_mgr->update)
dev_mgr->update(dev_mgr);
req_exit:
return ret;
}
struct device_clnt_data *devmgr_register_mitigation_client(struct device *dev,
const char *device_name,
void (*callback)(struct device_clnt_data *,
union device_request *, void *))
{
struct device_clnt_data *client = NULL;
struct device_manager_data *dev_mgr = NULL;
if (!dev || !device_name) {
pr_err("Invalid input\n");
return ERR_PTR(-EINVAL);
}
dev_mgr = find_device_by_name(device_name);
if (!dev_mgr) {
pr_err("Invalid device %s\n", device_name);
return ERR_PTR(-EINVAL);
}
client = devm_kzalloc(dev,
sizeof(struct device_clnt_data), GFP_KERNEL);
if (!client) {
pr_err("Memory alloc failed\n");
return ERR_PTR(-ENOMEM);
}
mutex_lock(&dev_mgr->clnt_lock);
client->dev_mgr = dev_mgr;
client->callback = callback;
list_add_tail(&client->clnt_ptr, &dev_mgr->client_list);
mutex_unlock(&dev_mgr->clnt_lock);
return client;
}
void devmgr_unregister_mitigation_client(struct device *dev,
struct device_clnt_data *clnt)
{
int ret = 0;
struct device_manager_data *dev_mgr = NULL;
if (!clnt) {
pr_err("Invalid input\n");
return;
}
ret = validate_client(clnt);
if (ret)
return;
dev_mgr = clnt->dev_mgr;
mutex_lock(&dev_mgr->clnt_lock);
list_del(&clnt->clnt_ptr);
mutex_unlock(&dev_mgr->clnt_lock);
devm_kfree(dev, clnt);
if (dev_mgr->update)
dev_mgr->update(dev_mgr);
}
static int msm_thermal_cpufreq_callback(struct notifier_block *nfb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
uint32_t max_freq_req, min_freq_req;
switch (event) {
case CPUFREQ_INCOMPATIBLE:
if (SYNC_CORE(policy->cpu)) {
max_freq_req =
cpus[policy->cpu].parent_ptr->limited_max_freq;
min_freq_req =
cpus[policy->cpu].parent_ptr->limited_min_freq;
} else {
max_freq_req = cpus[policy->cpu].limited_max_freq;
min_freq_req = cpus[policy->cpu].limited_min_freq;
}
pr_debug("mitigating CPU%d to freq max: %u min: %u\n",
policy->cpu, max_freq_req, min_freq_req);
cpufreq_verify_within_limits(policy, min_freq_req,
max_freq_req);
if (max_freq_req < min_freq_req)
pr_err("Invalid frequency request Max:%u Min:%u\n",
max_freq_req, min_freq_req);
break;
case CPUFREQ_CREATE_POLICY:
if (pending_cpu_freq != -1 &&
(cpumask_first(policy->related_cpus) ==
pending_cpu_freq)) {
pr_debug("Updating freq plan for cpu: %d\n",
policy->cpu);
pending_freq_table_ptr = cpufreq_frequency_get_table(
policy->cpu);
pending_cpu_freq = -1;
}
break;
}
return NOTIFY_OK;
}
static struct notifier_block msm_thermal_cpufreq_notifier = {
.notifier_call = msm_thermal_cpufreq_callback,
};
static void update_cpu_freq(int cpu)
{
int ret = 0;
if (cpu_online(cpu)) {
trace_thermal_pre_frequency_mit(cpu,
cpus[cpu].limited_max_freq,
cpus[cpu].limited_min_freq);
ret = cpufreq_update_policy(cpu);
trace_thermal_post_frequency_mit(cpu,
cpufreq_quick_get_max(cpu),
cpus[cpu].limited_min_freq);
if (ret)
pr_err("Unable to update policy for cpu:%d. err:%d\n",
cpu, ret);
}
}
static int * __init get_sync_cluster(struct device *dev, int *cnt)
{
int *sync_cluster = NULL, cluster_cnt = 0, ret = 0;
char *key = "qcom,synchronous-cluster-id";
if (!of_get_property(dev->of_node, key, &cluster_cnt)
|| cluster_cnt <= 0 || !core_ptr)
return NULL;
cluster_cnt /= sizeof(__be32);
if (cluster_cnt > core_ptr->entity_count) {
pr_err("Invalid cluster count:%d\n", cluster_cnt);
return NULL;
}
sync_cluster = devm_kzalloc(dev, sizeof(int) * cluster_cnt, GFP_KERNEL);
if (!sync_cluster) {
pr_err("Memory alloc failed\n");
return NULL;
}
ret = of_property_read_u32_array(dev->of_node, key, sync_cluster,
cluster_cnt);
if (ret) {
pr_err("Error in reading property:%s. err:%d\n", key, ret);
devm_kfree(dev, sync_cluster);
return NULL;
}
*cnt = cluster_cnt;
return sync_cluster;
}
static void update_cpu_datastructure(struct cluster_info *cluster_ptr,
int *sync_cluster, int sync_cluster_cnt)
{
int i = 0;
bool is_sync_cluster = false;
for (i = 0; (sync_cluster) && (i < sync_cluster_cnt); i++) {
if (cluster_ptr->cluster_id != sync_cluster[i])
continue;
is_sync_cluster = true;
break;
}
cluster_ptr->sync_cluster = is_sync_cluster;
pr_debug("Cluster ID:%d Sync cluster:%s Sibling mask:%lu\n",
cluster_ptr->cluster_id, is_sync_cluster ? "Yes" : "No",
*cluster_ptr->cluster_cores.bits);
for_each_cpu_mask(i, cluster_ptr->cluster_cores) {
cpus[i].parent_ptr = cluster_ptr;
}
}
static ssize_t cluster_info_show(
struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
uint32_t i = 0;
ssize_t tot_size = 0, size = 0;
for (; i < core_ptr->entity_count; i++) {
struct cluster_info *cluster_ptr =
&core_ptr->child_entity_ptr[i];
size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size,
"%d:%lu:%d ", cluster_ptr->cluster_id,
*cluster_ptr->cluster_cores.bits,
cluster_ptr->sync_cluster);
if ((tot_size + size) >= PAGE_SIZE) {
pr_err("Not enough buffer size");
break;
}
tot_size += size;
}
return tot_size;
}
static struct kobj_attribute cluster_info_attr = __ATTR_RO(cluster_info);
static int create_cpu_topology_sysfs(void)
{
int ret = 0;
struct kobject *module_kobj = NULL;
if (!cluster_info_probed) {
cluster_info_nodes_called = true;
return ret;
}
if (!core_ptr)
return ret;
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
pr_err("cannot find kobject\n");
return -ENODEV;
}
sysfs_attr_init(&cluster_info_attr.attr);
ret = sysfs_create_file(module_kobj, &cluster_info_attr.attr);
if (ret) {
pr_err("cannot create cluster info attr group. err:%d\n", ret);
return ret;
}
return ret;
}
static int get_device_tree_cluster_info(struct device *dev, int *cluster_id,
cpumask_t *cluster_cpus)
{
int i, cluster_cnt = 0, ret = 0;
uint32_t val = 0;
char *key = "qcom,synchronous-cluster-map";
if (!of_get_property(dev->of_node, key, &cluster_cnt)
|| cluster_cnt <= 0) {
pr_debug("Property %s not defined.\n", key);
return -ENODEV;
}
if (cluster_cnt % (sizeof(__be32) * 2)) {
pr_err("Invalid number(%d) of entry for %s\n",
cluster_cnt, key);
return -EINVAL;
}
cluster_cnt /= (sizeof(__be32) * 2);
for (i = 0; i < cluster_cnt; i++) {
ret = of_property_read_u32_index(dev->of_node, key,
i * 2, &val);
if (ret) {
pr_err("Error reading index%d\n", i * 2);
return -EINVAL;
}
cluster_id[i] = val;
of_property_read_u32_index(dev->of_node, key, i * 2 + 1, &val);
if (ret) {
pr_err("Error reading index%d\n", i * 2 + 1);
return -EINVAL;
}
*cluster_cpus[i].bits = val;
}
return cluster_cnt;
}
static int get_kernel_cluster_info(int *cluster_id, cpumask_t *cluster_cpus)
{
uint32_t _cpu, cluster_index, cluster_cnt;
for (_cpu = 0, cluster_cnt = 0; _cpu < num_possible_cpus(); _cpu++) {
if (topology_physical_package_id(_cpu) < 0) {
pr_err("CPU%d topology not initialized.\n", _cpu);
return -ENODEV;
}
/* Do not use the sibling cpumask from topology module.
** kernel topology module updates the sibling cpumask
** only when the cores are brought online for the first time.
** KTM figures out the sibling cpumask using the
** cluster and core ID mapping.
*/
for (cluster_index = 0; cluster_index < num_possible_cpus();
cluster_index++) {
if (cluster_id[cluster_index] == -1) {
cluster_id[cluster_index] =
topology_physical_package_id(_cpu);
*cluster_cpus[cluster_index].bits = 0;
cpumask_set_cpu(_cpu,
&cluster_cpus[cluster_index]);
cluster_cnt++;
break;
}
if (cluster_id[cluster_index] ==
topology_physical_package_id(_cpu)) {
cpumask_set_cpu(_cpu,
&cluster_cpus[cluster_index]);
break;
}
}
}
return cluster_cnt;
}
static void update_cpu_topology(struct device *dev)
{
int cluster_id[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
cpumask_t cluster_cpus[NR_CPUS];
uint32_t i, j;
int cluster_cnt, cpu, sync_cluster_cnt = 0;
struct cluster_info *temp_ptr = NULL;
int *sync_cluster_id = NULL;
cluster_info_probed = true;
cluster_cnt = get_kernel_cluster_info(cluster_id, cluster_cpus);
if (cluster_cnt <= 0) {
cluster_cnt = get_device_tree_cluster_info(dev, cluster_id,
cluster_cpus);
if (cluster_cnt <= 0) {
core_ptr = NULL;
pr_debug("Cluster Info not defined. KTM continues.\n");
return;
}
}
core_ptr = devm_kzalloc(dev, sizeof(struct cluster_info), GFP_KERNEL);
if (!core_ptr) {
pr_err("Memory alloc failed\n");
return;
}
core_ptr->parent_ptr = NULL;
core_ptr->entity_count = cluster_cnt;
core_ptr->cluster_id = -1;
core_ptr->sync_cluster = false;
temp_ptr = devm_kzalloc(dev, sizeof(struct cluster_info) * cluster_cnt,
GFP_KERNEL);
if (!temp_ptr) {
pr_err("Memory alloc failed\n");
devm_kfree(dev, core_ptr);
core_ptr = NULL;
return;
}
sync_cluster_id = get_sync_cluster(dev, &sync_cluster_cnt);
for (i = 0; i < cluster_cnt; i++) {
pr_debug("Cluster_ID:%d CPU's:%lu\n", cluster_id[i],
*cluster_cpus[i].bits);
temp_ptr[i].cluster_id = cluster_id[i];
temp_ptr[i].parent_ptr = core_ptr;
temp_ptr[i].cluster_cores = cluster_cpus[i];
temp_ptr[i].limited_max_freq = UINT_MAX;
temp_ptr[i].limited_min_freq = 0;
temp_ptr[i].freq_idx = 0;
temp_ptr[i].freq_idx_low = 0;
temp_ptr[i].freq_idx_high = 0;
temp_ptr[i].freq_table = NULL;
j = 0;
for_each_cpu_mask(cpu, cluster_cpus[i])
j++;
temp_ptr[i].entity_count = j;
temp_ptr[i].child_entity_ptr = NULL;
update_cpu_datastructure(&temp_ptr[i], sync_cluster_id,
sync_cluster_cnt);
}
core_ptr->child_entity_ptr = temp_ptr;
}
static int __ref init_cluster_freq_table(void)
{
uint32_t _cluster = 0, _cpu = 0, table_len = 0, idx = 0;
int ret = 0;
struct cluster_info *cluster_ptr = NULL;
struct cpufreq_policy *policy = NULL;
struct cpufreq_frequency_table *freq_table_ptr = NULL;
for (; _cluster < core_ptr->entity_count; _cluster++, table_len = 0,
(policy && freq_table_ptr) ? cpufreq_cpu_put(policy) : 0,
policy = NULL, freq_table_ptr = NULL) {
cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
if (cluster_ptr->freq_table)
continue;
for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) {
policy = cpufreq_cpu_get(_cpu);
if (!policy)
continue;
freq_table_ptr = cpufreq_frequency_get_table(
policy->cpu);
if (!freq_table_ptr) {
cpufreq_cpu_put(policy);
continue;
} else {
break;
}
}
if (!freq_table_ptr) {
_cpu = first_cpu(cluster_ptr->cluster_cores);
pr_debug(
"Online cpu%d in cluster%d to read cpufreq table\n",
cluster_ptr->cluster_id, _cpu);
pending_cpu_freq = _cpu;
if (!cpu_online(_cpu)) {
#ifdef CONFIG_SMP
cpu_up(_cpu);
cpu_down(_cpu);
#endif
}
freq_table_ptr = pending_freq_table_ptr;
}
if (!freq_table_ptr) {
pr_debug("Error reading cluster%d cpufreq table\n",
cluster_ptr->cluster_id);
ret = -EAGAIN;
continue;
}
while (freq_table_ptr[table_len].frequency
!= CPUFREQ_TABLE_END)
table_len++;
cluster_ptr->freq_idx_low = 0;
cluster_ptr->freq_idx_high = cluster_ptr->freq_idx =
table_len - 1;
if (cluster_ptr->freq_idx_high < 0
|| (cluster_ptr->freq_idx_high
< cluster_ptr->freq_idx_low)) {
cluster_ptr->freq_idx = cluster_ptr->freq_idx_low =
cluster_ptr->freq_idx_high = 0;
WARN(1, "Cluster%d frequency table length:%d\n",
cluster_ptr->cluster_id, table_len);
ret = -EINVAL;
goto release_and_exit;
}
cluster_ptr->freq_table = devm_kzalloc(
&msm_thermal_info.pdev->dev,
sizeof(struct cpufreq_frequency_table) * table_len,
GFP_KERNEL);
if (!cluster_ptr->freq_table) {
pr_err("memory alloc failed\n");
cluster_ptr->freq_idx = cluster_ptr->freq_idx_low =
cluster_ptr->freq_idx_high = 0;
ret = -ENOMEM;
goto release_and_exit;
}
for (idx = 0; idx < table_len; idx++)
cluster_ptr->freq_table[idx].frequency =
freq_table_ptr[idx].frequency;
}
return ret;
release_and_exit:
cpufreq_cpu_put(policy);
return ret;
}
static void update_cluster_freq(void)
{
int online_cpu = -1;
struct cluster_info *cluster_ptr = NULL;
uint32_t _cluster = 0, _cpu = 0, max = UINT_MAX, min = 0;
if (!core_ptr)
return;
for (; _cluster < core_ptr->entity_count; _cluster++, _cpu = 0,
online_cpu = -1, max = UINT_MAX, min = 0) {
/*
** If a cluster is synchronous, go over the frequency limits
** of each core in that cluster and aggregate the minimum
** and maximum frequencies. After aggregating, request for
** frequency update on the first online core in that cluster.
** Cpufreq driver takes care of updating the frequency of
** other cores in a synchronous cluster.
*/
cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
if (!cluster_ptr->sync_cluster)
continue;
for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) {
if (online_cpu == -1 && cpu_online(_cpu))
online_cpu = _cpu;
max = min(max, cpus[_cpu].limited_max_freq);
min = max(min, cpus[_cpu].limited_min_freq);
}
if (cluster_ptr->limited_max_freq == max
&& cluster_ptr->limited_min_freq == min)
continue;
cluster_ptr->limited_max_freq = max;
cluster_ptr->limited_min_freq = min;
if (online_cpu != -1)
update_cpu_freq(online_cpu);
}
}
static void do_cluster_freq_ctrl(long temp)
{
uint32_t _cluster = 0;
int _cpu = -1, freq_idx = 0;
bool mitigate = false;
struct cluster_info *cluster_ptr = NULL;
if (temp >= msm_thermal_info.limit_temp_degC)
mitigate = true;
else if (temp < msm_thermal_info.limit_temp_degC -
msm_thermal_info.temp_hysteresis_degC)
mitigate = false;
else
return;
get_online_cpus();
for (; _cluster < core_ptr->entity_count; _cluster++) {
cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
if (!cluster_ptr->freq_table)
continue;
if (mitigate)
freq_idx = max_t(int, cluster_ptr->freq_idx_low,
(cluster_ptr->freq_idx
- msm_thermal_info.bootup_freq_step));
else
freq_idx = min_t(int, cluster_ptr->freq_idx_high,
(cluster_ptr->freq_idx
+ msm_thermal_info.bootup_freq_step));
if (freq_idx == cluster_ptr->freq_idx)
continue;
cluster_ptr->freq_idx = freq_idx;
for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) {
if (!(msm_thermal_info.bootup_freq_control_mask
& BIT(_cpu)))
continue;
pr_info("Limiting CPU%d max frequency to %u. Temp:%ld\n"
, _cpu
, cluster_ptr->freq_table[freq_idx].frequency
, temp);
cpus[_cpu].limited_max_freq =
cluster_ptr->freq_table[freq_idx].frequency;
}
}
if (_cpu != -1)
update_cluster_freq();
put_online_cpus();
}
/* If freq table exists, then we can send freq request */
static int check_freq_table(void)
{
int ret = 0;
uint32_t i = 0;
static bool invalid_table;
if (invalid_table)
return -EINVAL;
if (freq_table_get)
return 0;
if (core_ptr) {
ret = init_cluster_freq_table();
if (!ret)
freq_table_get = 1;
else if (ret == -EINVAL)
invalid_table = true;
return ret;
}
table = cpufreq_frequency_get_table(0);
if (!table) {
pr_debug("error reading cpufreq table\n");
return -EINVAL;
}
while (table[i].frequency != CPUFREQ_TABLE_END)
i++;
limit_idx_low = 0;
limit_idx_high = limit_idx = i - 1;
if (limit_idx_high < 0 || limit_idx_high < limit_idx_low) {
invalid_table = true;
table = NULL;
limit_idx_low = limit_idx_high = limit_idx = 0;
WARN(1, "CPU0 frequency table length:%d\n", i);
return -EINVAL;
}
freq_table_get = 1;
return 0;
}
static int update_cpu_min_freq_all(uint32_t min)
{
uint32_t cpu = 0, _cluster = 0;
int ret = 0;
struct cluster_info *cluster_ptr = NULL;
bool valid_table = false;
if (!freq_table_get) {
ret = check_freq_table();
if (ret && !core_ptr) {
pr_err("Fail to get freq table. err:%d\n", ret);
return ret;
}
}
/* If min is larger than allowed max */
if (core_ptr) {
for (; _cluster < core_ptr->entity_count; _cluster++) {
cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
if (!cluster_ptr->freq_table)
continue;
valid_table = true;
min = min(min,
cluster_ptr->freq_table[
cluster_ptr->freq_idx_high].frequency);
}
if (!valid_table)
return ret;
} else {
min = min(min, table[limit_idx_high].frequency);
}
pr_debug("Requesting min freq:%u for all CPU's\n", min);
if (freq_mitigation_task) {
min_freq_limit = min;
complete(&freq_mitigation_complete);
} else {
get_online_cpus();
for_each_possible_cpu(cpu) {
cpus[cpu].limited_min_freq = min;
if (!SYNC_CORE(cpu))
update_cpu_freq(cpu);
}
update_cluster_freq();
put_online_cpus();
}
return ret;
}
static int vdd_restriction_apply_freq(struct rail *r, int level)
{
int ret = 0;
if (level == r->curr_level)
return ret;
/* level = -1: disable, level = 0,1,2..n: enable */
if (level == -1) {
ret = update_cpu_min_freq_all(r->min_level);
if (ret)
return ret;
else
r->curr_level = -1;
} else if (level >= 0 && level < (r->num_levels)) {
ret = update_cpu_min_freq_all(r->levels[level]);
if (ret)
return ret;
else
r->curr_level = level;
} else {
pr_err("level input:%d is not within range\n", level);
return -EINVAL;
}
return ret;
}
static int vdd_restriction_apply_voltage(struct rail *r, int level)
{
int ret = 0;
if (r->reg == NULL) {
pr_err("%s don't have regulator handle. can't apply vdd\n",
r->name);
return -EFAULT;
}
if (level == r->curr_level)
return ret;
/* level = -1: disable, level = 0,1,2..n: enable */
if (level == -1) {
ret = regulator_set_voltage(r->reg, r->min_level,
r->levels[r->num_levels - 1]);
if (!ret)
r->curr_level = -1;
pr_debug("Requested min level for %s. curr level: %d\n",
r->name, r->curr_level);
} else if (level >= 0 && level < (r->num_levels)) {
ret = regulator_set_voltage(r->reg, r->levels[level],
r->levels[r->num_levels - 1]);
if (!ret)
r->curr_level = level;
pr_debug("Requesting level %d for %s. curr level: %d\n",
r->levels[level], r->name, r->levels[r->curr_level]);
} else {
pr_err("level input:%d is not within range\n", level);
return -EINVAL;
}
return ret;
}
/* Setting all rails the same mode */
static int psm_set_mode_all(int mode)
{
int i = 0;
int fail_cnt = 0;
int ret = 0;
pr_debug("Requesting PMIC Mode: %d\n", mode);
for (i = 0; i < psm_rails_cnt; i++) {
if (psm_rails[i].mode != mode) {
ret = rpm_regulator_set_mode(psm_rails[i].reg, mode);
if (ret) {
pr_err("Cannot set mode:%d for %s. err:%d",
mode, psm_rails[i].name, ret);
fail_cnt++;
} else
psm_rails[i].mode = mode;
}
}
return fail_cnt ? (-EFAULT) : ret;
}
static ssize_t vdd_rstr_en_show(
struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
return snprintf(buf, PAGE_SIZE, "%d\n", en->enabled);
}
static ssize_t vdd_rstr_en_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int ret = 0;
int i = 0;
uint8_t en_cnt = 0;
uint8_t dis_cnt = 0;
uint32_t val = 0;
struct kernel_param kp;
struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
mutex_lock(&vdd_rstr_mutex);
kp.arg = &val;
ret = param_set_bool(buf, &kp);
if (ret) {
pr_err("Invalid input %s for enabled\n", buf);
goto done_vdd_rstr_en;
}
if ((val == 0) && (en->enabled == 0))
goto done_vdd_rstr_en;
for (i = 0; i < rails_cnt; i++) {
if (rails[i].freq_req == 1 && freq_table_get)
ret = vdd_restriction_apply_freq(&rails[i],
(val) ? 0 : -1);
else
ret = vdd_restriction_apply_voltage(&rails[i],
(val) ? 0 : -1);
/*
* Even if fail to set one rail, still try to set the
* others. Continue the loop
*/
if (ret)
pr_err("Set vdd restriction for %s failed\n",
rails[i].name);
else {
if (val)
en_cnt++;
else
dis_cnt++;
}
}
/* As long as one rail is enabled, vdd rstr is enabled */
if (val && en_cnt)
en->enabled = 1;
else if (!val && (dis_cnt == rails_cnt))
en->enabled = 0;
pr_debug("%s vdd restriction. curr: %d\n",
(val) ? "Enable" : "Disable", en->enabled);
done_vdd_rstr_en:
mutex_unlock(&vdd_rstr_mutex);
return count;
}
static int send_temperature_band(enum msm_thermal_phase_ctrl phase,
enum msm_temp_band req_band)
{
int ret = 0;
uint32_t msg_id;
struct msm_rpm_request *rpm_req;
unsigned int band = req_band;
uint32_t key, resource, resource_id;
if (phase < 0 || phase >= MSM_PHASE_CTRL_NR ||
req_band <= 0 || req_band >= MSM_TEMP_MAX_NR) {
pr_err("Invalid input\n");
ret = -EINVAL;
goto phase_ctrl_exit;
}
switch (phase) {
case MSM_CX_PHASE_CTRL:
key = msm_thermal_info.cx_phase_request_key;
break;
case MSM_GFX_PHASE_CTRL:
key = msm_thermal_info.gfx_phase_request_key;
break;
default:
goto phase_ctrl_exit;
break;
}
resource = msm_thermal_info.phase_rpm_resource_type;
resource_id = msm_thermal_info.phase_rpm_resource_id;
pr_debug("Sending %s temperature band %d\n",
(phase == MSM_CX_PHASE_CTRL) ? "CX" : "GFX",
req_band);
rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET,
resource, resource_id, 1);
if (!rpm_req) {
pr_err("Creating RPM request failed\n");
ret = -ENXIO;
goto phase_ctrl_exit;
}
ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)&band,
(int)sizeof(band));
if (ret) {
pr_err("Adding KVP data failed. err:%d\n", ret);
goto free_rpm_handle;
}
msg_id = msm_rpm_send_request(rpm_req);
if (!msg_id) {
pr_err("RPM send request failed\n");
ret = -ENXIO;
goto free_rpm_handle;
}
ret = msm_rpm_wait_for_ack(msg_id);
if (ret) {
pr_err("RPM wait for ACK failed. err:%d\n", ret);
goto free_rpm_handle;
}
free_rpm_handle:
msm_rpm_free_request(rpm_req);
phase_ctrl_exit:
return ret;
}
static uint32_t msm_thermal_str_to_int(const char *inp)
{
int i, len;
uint32_t output = 0;
len = strnlen(inp, sizeof(uint32_t));
for (i = 0; i < len; i++)
output |= inp[i] << (i * 8);
return output;
}
static ssize_t sensor_info_show(
struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
int i;
ssize_t tot_size = 0, size = 0;
for (i = 0; i < sensor_cnt; i++) {
size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size,
"%s:%s:%s:%d ",
sensors[i].type, sensors[i].name,
sensors[i].alias ? : "",
sensors[i].scaling_factor);
if (tot_size + size >= PAGE_SIZE) {
pr_err("Not enough buffer size\n");
break;
}
tot_size += size;
}
if (tot_size)
buf[tot_size - 1] = '\n';
return tot_size;
}
static struct vdd_rstr_enable vdd_rstr_en = {
.ko_attr.attr.name = __stringify(enabled),
.ko_attr.attr.mode = 0644,
.ko_attr.show = vdd_rstr_en_show,
.ko_attr.store = vdd_rstr_en_store,
.enabled = 1,
};
static struct attribute *vdd_rstr_en_attribs[] = {
&vdd_rstr_en.ko_attr.attr,
NULL,
};
static struct attribute_group vdd_rstr_en_attribs_gp = {
.attrs = vdd_rstr_en_attribs,
};
static ssize_t vdd_rstr_reg_value_show(
struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
int val = 0;
struct rail *reg = VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr);
/* -1:disabled, -2:fail to get regualtor handle */
if (reg->curr_level < 0)
val = reg->curr_level;
else
val = reg->levels[reg->curr_level];
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
static ssize_t vdd_rstr_reg_level_show(
struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
return snprintf(buf, PAGE_SIZE, "%d\n", reg->curr_level);
}
static ssize_t vdd_rstr_reg_level_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int ret = 0;
int val = 0;
struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
mutex_lock(&vdd_rstr_mutex);
if (vdd_rstr_en.enabled == 0)
goto done_store_level;
ret = kstrtouint(buf, 10, &val);
if (ret) {
pr_err("Invalid input %s for level\n", buf);
goto done_store_level;
}
if (val < 0 || val > reg->num_levels - 1) {
pr_err(" Invalid number %d for level\n", val);
goto done_store_level;
}
if (val != reg->curr_level) {
if (reg->freq_req == 1 && freq_table_get)
update_cpu_min_freq_all(reg->levels[val]);
else {
ret = vdd_restriction_apply_voltage(reg, val);
if (ret) {
pr_err( \
"Set vdd restriction for regulator %s failed. err:%d\n",
reg->name, ret);
goto done_store_level;
}
}
reg->curr_level = val;
pr_debug("Request level %d for %s\n",
reg->curr_level, reg->name);
}
done_store_level:
mutex_unlock(&vdd_rstr_mutex);
return count;
}
static int request_optimum_current(struct psm_rail *rail, enum ocr_request req)
{
int ret = 0;
if ((!rail) || (req >= OPTIMUM_CURRENT_NR) ||
(req < 0)) {
pr_err("Invalid input %d\n", req);
ret = -EINVAL;
goto request_ocr_exit;
}
ret = regulator_set_optimum_mode(rail->phase_reg,
(req == OPTIMUM_CURRENT_MAX) ? MAX_CURRENT_UA : 0);
if (ret < 0) {
pr_err("Optimum current request failed. err:%d\n", ret);
goto request_ocr_exit;
}
ret = 0; /*regulator_set_optimum_mode returns the mode on success*/
pr_debug("Requested optimum current mode: %d\n", req);
request_ocr_exit:
return ret;
}
static int ocr_set_mode_all(enum ocr_request req)
{
int ret = 0, i;
for (i = 0; i < ocr_rail_cnt; i++) {
if (ocr_rails[i].mode == req)
continue;
ret = request_optimum_current(&ocr_rails[i], req);
if (ret)
goto ocr_set_mode_exit;
ocr_rails[i].mode = req;
}
ocr_set_mode_exit:
return ret;
}
static ssize_t ocr_reg_mode_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
}
static ssize_t ocr_reg_mode_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int ret = 0;
int val = 0;
struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
if (!ocr_enabled)
return count;
mutex_lock(&ocr_mutex);
ret = kstrtoint(buf, 10, &val);
if (ret) {
pr_err("Invalid input %s for mode. err:%d\n",
buf, ret);
goto done_ocr_store;
}
if ((val != OPTIMUM_CURRENT_MAX) &&
(val != OPTIMUM_CURRENT_MIN)) {
pr_err("Invalid value %d for mode\n", val);
goto done_ocr_store;
}
if (val != reg->mode) {
ret = request_optimum_current(reg, val);
if (ret)
goto done_ocr_store;
reg->mode = val;
}
done_ocr_store:
mutex_unlock(&ocr_mutex);
return count;
}
static ssize_t store_phase_request(const char *buf, size_t count, bool is_cx)
{
int ret = 0, val;
struct mutex *phase_mutex = (is_cx) ? (&cx_mutex) : (&gfx_mutex);
enum msm_thermal_phase_ctrl phase_req = (is_cx) ? MSM_CX_PHASE_CTRL :
MSM_GFX_PHASE_CTRL;
ret = kstrtoint(buf, 10, &val);
if (ret) {
pr_err("Invalid input %s for %s temperature band\n",
buf, (is_cx) ? "CX" : "GFX");
goto phase_store_exit;
}
if ((val <= 0) || (val >= MSM_TEMP_MAX_NR)) {
pr_err("Invalid input %d for %s temperature band\n",
val, (is_cx) ? "CX" : "GFX");
ret = -EINVAL;
goto phase_store_exit;
}
mutex_lock(phase_mutex);
if (val != ((is_cx) ? curr_cx_band : curr_gfx_band)) {
ret = send_temperature_band(phase_req, val);
if (!ret) {
*((is_cx) ? &curr_cx_band : &curr_gfx_band) = val;
} else {
pr_err("Failed to send %d temp. band to %s rail\n", val,
(is_cx) ? "CX" : "GFX");
goto phase_store_unlock_exit;
}
}
ret = count;
phase_store_unlock_exit:
mutex_unlock(phase_mutex);
phase_store_exit:
return ret;
}
#define show_phase(_name, _variable) \
static ssize_t _name##_phase_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
return snprintf(buf, PAGE_SIZE, "%u\n", _variable); \
}
#define store_phase(_name, _variable, _iscx) \
static ssize_t _name##_phase_store(struct kobject *kobj, \
struct kobj_attribute *attr, const char *buf, size_t count) \
{ \
return store_phase_request(buf, count, _iscx); \
}
show_phase(gfx, curr_gfx_band)
show_phase(cx, curr_cx_band)
store_phase(gfx, curr_gfx_band, false)
store_phase(cx, curr_cx_band, true)
static ssize_t psm_reg_mode_show(
struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
}
static ssize_t psm_reg_mode_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int ret = 0;
int val = 0;
struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
mutex_lock(&psm_mutex);
ret = kstrtoint(buf, 10, &val);
if (ret) {
pr_err("Invalid input %s for mode\n", buf);
goto done_psm_store;
}
if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) {
pr_err("Invalid number %d for mode\n", val);
goto done_psm_store;
}
if (val != reg->mode) {
ret = rpm_regulator_set_mode(reg->reg, val);
if (ret) {
pr_err("Fail to set Mode:%d for %s. err:%d\n",
val, reg->name, ret);
goto done_psm_store;
}
reg->mode = val;
}
done_psm_store:
mutex_unlock(&psm_mutex);
return count;
}
static int check_sensor_id(int sensor_id)
{
int i = 0;
bool hw_id_found = false;
int ret = 0;
for (i = 0; i < max_tsens_num; i++) {
if (sensor_id == tsens_id_map[i]) {
hw_id_found = true;
break;
}
}
if (!hw_id_found) {
pr_err("Invalid sensor hw id:%d\n", sensor_id);
return -EINVAL;
}
return ret;
}
static int create_sensor_id_map(void)
{
int i = 0;
int ret = 0;
tsens_id_map = kzalloc(sizeof(int) * max_tsens_num,
GFP_KERNEL);
if (!tsens_id_map) {
pr_err("Cannot allocate memory for tsens_id_map\n");
return -ENOMEM;
}
for (i = 0; i < max_tsens_num; i++) {
ret = tsens_get_hw_id_mapping(i, &tsens_id_map[i]);
/* If return -ENXIO, hw_id is default in sequence */
if (ret) {
if (ret == -ENXIO) {
tsens_id_map[i] = i;
ret = 0;
} else {
pr_err("Failed to get hw id for id:%d.err:%d\n",
i, ret);
goto fail;
}
}
}
return ret;
fail:
kfree(tsens_id_map);
return ret;
}
/* 1:enable, 0:disable */
static int vdd_restriction_apply_all(int en)
{
int i = 0;
int en_cnt = 0;
int dis_cnt = 0;
int fail_cnt = 0;
int ret = 0;
for (i = 0; i < rails_cnt; i++) {
if (rails[i].freq_req == 1)
if (freq_table_get)
ret = vdd_restriction_apply_freq(&rails[i],
en ? 0 : -1);
else
continue;
else
ret = vdd_restriction_apply_voltage(&rails[i],
en ? 0 : -1);
if (ret) {
pr_err("Failed to %s for %s. err:%d",
(en) ? "enable" : "disable",
rails[i].name, ret);
fail_cnt++;
} else {
if (en)
en_cnt++;
else
dis_cnt++;
}
}
/* As long as one rail is enabled, vdd rstr is enabled */
if (en && en_cnt)
vdd_rstr_en.enabled = 1;
else if (!en && (dis_cnt == rails_cnt))
vdd_rstr_en.enabled = 0;
/*
* Check fail_cnt again to make sure all of the rails are applied
* restriction successfully or not
*/
if (fail_cnt)
return -EFAULT;
return ret;
}
static int set_and_activate_threshold(uint32_t sensor_id,
struct sensor_threshold *threshold)
{
int ret = 0;
ret = sensor_set_trip(sensor_id, threshold);
if (ret != 0) {
pr_err("sensor:%u Error in setting trip:%d. err:%d\n",
sensor_id, threshold->trip, ret);
goto set_done;
}
ret = sensor_activate_trip(sensor_id, threshold, true);
if (ret != 0) {
pr_err("sensor:%u Error in enabling trip:%d. err:%d\n",
sensor_id, threshold->trip, ret);
goto set_done;
}
set_done:
return ret;
}
static int therm_get_temp(uint32_t id, enum sensor_id_type type, long *temp)
{
int ret = 0;
struct tsens_device tsens_dev;
if (!temp) {
pr_err("Invalid value\n");
ret = -EINVAL;
goto get_temp_exit;
}
switch (type) {
case THERM_ZONE_ID:
ret = sensor_get_temp(id, temp);
if (ret) {
pr_err("Unable to read thermal zone sensor:%d\n", id);
goto get_temp_exit;
}
break;
case THERM_TSENS_ID:
tsens_dev.sensor_num = id;
ret = tsens_get_temp(&tsens_dev, temp);
if (ret) {
pr_err("Unable to read TSENS sensor:%d\n",
tsens_dev.sensor_num);
goto get_temp_exit;
}
break;
default:
pr_err("Invalid type\n");
ret = -EINVAL;
goto get_temp_exit;
}
get_temp_exit:
return ret;
}
static int msm_thermal_panic_callback(struct notifier_block *nfb,
unsigned long event, void *data)
{
int i;
for (i = 0; i < max_tsens_num; i++)
therm_get_temp(tsens_id_map[i],
THERM_TSENS_ID,
&tsens_temp_at_panic[i]);
return NOTIFY_OK;
}
static struct notifier_block msm_thermal_panic_notifier = {
.notifier_call = msm_thermal_panic_callback,
};
static int set_threshold(uint32_t zone_id,
struct sensor_threshold *threshold)
{
int i = 0, ret = 0;
long temp;
if (!threshold) {
pr_err("Invalid input\n");
ret = -EINVAL;
goto set_threshold_exit;
}
ret = therm_get_temp(zone_id, THERM_ZONE_ID, &temp);
if (ret) {
pr_err("Unable to read temperature for zone:%d. err:%d\n",
zone_id, ret);
goto set_threshold_exit;
}
while (i < MAX_THRESHOLD) {
switch (threshold[i].trip) {
case THERMAL_TRIP_CONFIGURABLE_HI:
if (threshold[i].temp >= temp) {
ret = set_and_activate_threshold(zone_id,
&threshold[i]);
if (ret)
goto set_threshold_exit;
}
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
if (threshold[i].temp <= temp) {
ret = set_and_activate_threshold(zone_id,
&threshold[i]);
if (ret)
goto set_threshold_exit;
}
break;
default:
pr_err("zone:%u Invalid trip:%d\n", zone_id,
threshold[i].trip);
break;
}
i++;
}
set_threshold_exit:
return ret;
}
static int apply_vdd_mx_restriction(void)
{
int ret = 0;
if (mx_restr_applied)
goto done;
ret = regulator_set_voltage(vdd_mx, msm_thermal_info.vdd_mx_min,
INT_MAX);
if (ret) {
pr_err("Failed to add mx vote, error %d\n", ret);
goto done;
}
ret = regulator_enable(vdd_mx);
if (ret)
pr_err("Failed to vote for mx voltage %d, error %d\n",
msm_thermal_info.vdd_mx_min, ret);
else
mx_restr_applied = true;
done:
return ret;
}
static int remove_vdd_mx_restriction(void)
{
int ret = 0;
if (!mx_restr_applied)
goto done;
ret = regulator_disable(vdd_mx);
if (ret) {
pr_err("Failed to disable mx voting, error %d\n", ret);
goto done;
}
ret = regulator_set_voltage(vdd_mx, 0, INT_MAX);
if (ret)
pr_err("Failed to remove mx vote, error %d\n", ret);
else
mx_restr_applied = false;
done:
return ret;
}
static int do_vdd_mx(void)
{
long temp = 0;
int ret = 0;
int i = 0;
int dis_cnt = 0;
if (!vdd_mx_enabled)
return ret;
mutex_lock(&vdd_mx_mutex);
for (i = 0; i < thresh[MSM_VDD_MX_RESTRICTION].thresh_ct; i++) {
ret = therm_get_temp(
thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].sensor_id,
thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].id_type,
&temp);
if (ret) {
pr_err("Unable to read TSENS sensor:%d, err:%d\n",
thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].
sensor_id, ret);
dis_cnt++;
continue;
}
if (temp <= msm_thermal_info.vdd_mx_temp_degC) {
ret = apply_vdd_mx_restriction();
if (ret)
pr_err(
"Failed to apply mx restriction\n");
goto exit;
} else if (temp >= (msm_thermal_info.vdd_mx_temp_degC +
msm_thermal_info.vdd_mx_temp_hyst_degC)) {
dis_cnt++;
}
}
if ((dis_cnt == thresh[MSM_VDD_MX_RESTRICTION].thresh_ct)) {
ret = remove_vdd_mx_restriction();
if (ret)
pr_err("Failed to remove vdd mx restriction\n");
}
exit:
mutex_unlock(&vdd_mx_mutex);
return ret;
}
static void vdd_mx_notify(struct therm_threshold *trig_thresh)
{
static uint32_t mx_sens_status;
int ret;
pr_debug("Sensor%d trigger recevied for type %d\n",
trig_thresh->sensor_id,
trig_thresh->trip_triggered);
if (!vdd_mx_enabled)
return;
mutex_lock(&vdd_mx_mutex);
switch (trig_thresh->trip_triggered) {
case THERMAL_TRIP_CONFIGURABLE_LOW:
mx_sens_status |= BIT(trig_thresh->sensor_id);
break;
case THERMAL_TRIP_CONFIGURABLE_HI:
if (mx_sens_status & BIT(trig_thresh->sensor_id))
mx_sens_status ^= BIT(trig_thresh->sensor_id);
break;
default:
pr_err("Unsupported trip type\n");
break;
}
if (mx_sens_status) {
ret = apply_vdd_mx_restriction();
if (ret)
pr_err("Failed to apply mx restriction\n");
} else if (!mx_sens_status) {
ret = remove_vdd_mx_restriction();
if (ret)
pr_err("Failed to remove vdd mx restriction\n");
}
mutex_unlock(&vdd_mx_mutex);
set_threshold(trig_thresh->sensor_id, trig_thresh->threshold);
}
static void msm_thermal_bite(int tsens_id, long temp)
{
struct scm_desc desc;
pr_err("TSENS:%d reached temperature:%ld. System reset\n",
tsens_id, temp);
if (!is_scm_armv8()) {
scm_call_atomic1(SCM_SVC_BOOT, THERM_SECURE_BITE_CMD, 0);
} else {
desc.args[0] = 0;
desc.arginfo = SCM_ARGS(1);
scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
THERM_SECURE_BITE_CMD), &desc);
}
}
static int do_therm_reset(void)
{
int ret = 0, i;
long temp = 0;
if (!therm_reset_enabled)
return ret;
for (i = 0; i < thresh[MSM_THERM_RESET].thresh_ct; i++) {
ret = therm_get_temp(
thresh[MSM_THERM_RESET].thresh_list[i].sensor_id,
thresh[MSM_THERM_RESET].thresh_list[i].id_type,
&temp);
if (ret) {
pr_err("Unable to read TSENS sensor:%d. err:%d\n",
thresh[MSM_THERM_RESET].thresh_list[i].sensor_id,
ret);
continue;
}
if (temp >= msm_thermal_info.therm_reset_temp_degC)
msm_thermal_bite(
thresh[MSM_THERM_RESET].thresh_list[i].sensor_id, temp);
}
return ret;
}
static void therm_reset_notify(struct therm_threshold *thresh_data)
{
long temp;
int ret = 0;
if (!therm_reset_enabled)
return;
if (!thresh_data) {
pr_err("Invalid input\n");
return;
}
switch (thresh_data->trip_triggered) {
case THERMAL_TRIP_CONFIGURABLE_HI:
ret = therm_get_temp(thresh_data->sensor_id,
thresh_data->id_type, &temp);
if (ret)
pr_err("Unable to read TSENS sensor:%d. err:%d\n",
thresh_data->sensor_id, ret);
msm_thermal_bite(tsens_id_map[thresh_data->sensor_id],
temp);
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
break;
default:
pr_err("Invalid trip type\n");
break;
}
set_threshold(thresh_data->sensor_id, thresh_data->threshold);
}
#ifdef CONFIG_SMP
static void __ref do_core_control(long temp)
{
int i = 0;
int ret = 0;
if (!core_control_enabled)
return;
mutex_lock(&core_control_mutex);
if (msm_thermal_info.core_control_mask &&
temp >= msm_thermal_info.core_limit_temp_degC) {
for (i = num_possible_cpus(); i > 0; i--) {
if (!(msm_thermal_info.core_control_mask & BIT(i)))
continue;
if (cpus_offlined & BIT(i) && !cpu_online(i))
continue;
pr_info("Set Offline: CPU%d Temp: %ld\n",
i, temp);
trace_thermal_pre_core_offline(i);
ret = cpu_down(i);
if (ret)
pr_err("Error %d offline core %d\n",
ret, i);
trace_thermal_post_core_offline(i,
cpumask_test_cpu(i, cpu_online_mask));
cpus_offlined |= BIT(i);
break;
}
} else if (msm_thermal_info.core_control_mask && cpus_offlined &&
temp <= (msm_thermal_info.core_limit_temp_degC -
msm_thermal_info.core_temp_hysteresis_degC)) {
for (i = 0; i < num_possible_cpus(); i++) {
if (!(cpus_offlined & BIT(i)))
continue;
cpus_offlined &= ~BIT(i);
pr_info("Allow Online CPU%d Temp: %ld\n",
i, temp);
/*
* If this core is already online, then bring up the
* next offlined core.
*/
if (cpu_online(i))
continue;
trace_thermal_pre_core_online(i);
ret = cpu_up(i);
if (ret)
pr_err("Error %d online core %d\n",
ret, i);
trace_thermal_post_core_online(i,
cpumask_test_cpu(i, cpu_online_mask));
break;
}
}
mutex_unlock(&core_control_mutex);
}
/* Call with core_control_mutex locked */
static int __ref update_offline_cores(int val)
{
uint32_t cpu = 0;
int ret = 0;
uint32_t previous_cpus_offlined = 0;
if (!core_control_enabled)
return 0;
previous_cpus_offlined = cpus_offlined;
cpus_offlined = msm_thermal_info.core_control_mask & val;
for_each_possible_cpu(cpu) {
if (cpus_offlined & BIT(cpu)) {
if (!cpu_online(cpu))
continue;
trace_thermal_pre_core_offline(cpu);
ret = cpu_down(cpu);
if (ret)
pr_err("Unable to offline CPU%d. err:%d\n",
cpu, ret);
else
pr_debug("Offlined CPU%d\n", cpu);
trace_thermal_post_core_offline(cpu,
cpumask_test_cpu(cpu, cpu_online_mask));
} else if (online_core && (previous_cpus_offlined & BIT(cpu))) {
if (cpu_online(cpu))
continue;
trace_thermal_pre_core_online(cpu);
ret = cpu_up(cpu);
if (ret && ret == notifier_to_errno(NOTIFY_BAD))
pr_debug("Onlining CPU%d is vetoed\n", cpu);
else if (ret)
pr_err("Unable to online CPU%d. err:%d\n",
cpu, ret);
else
pr_debug("Onlined CPU%d\n", cpu);
trace_thermal_post_core_online(cpu,
cpumask_test_cpu(cpu, cpu_online_mask));
}
}
return ret;
}
static __ref int do_hotplug(void *data)
{
int ret = 0;
uint32_t cpu = 0, mask = 0;
struct device_clnt_data *clnt = NULL;
struct sched_param param = {.sched_priority = MAX_RT_PRIO-2};
if (!core_control_enabled) {
pr_debug("Core control disabled\n");
return -EINVAL;
}
sched_setscheduler(current, SCHED_FIFO, ¶m);
while (!kthread_should_stop()) {
while (wait_for_completion_interruptible(
&hotplug_notify_complete) != 0)
;
INIT_COMPLETION(hotplug_notify_complete);
mask = 0;
mutex_lock(&core_control_mutex);
for_each_possible_cpu(cpu) {
if (hotplug_enabled &&
cpus[cpu].hotplug_thresh_clear) {
set_threshold(cpus[cpu].sensor_id,
&cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]);
cpus[cpu].hotplug_thresh_clear = false;
}
if (cpus[cpu].offline || cpus[cpu].user_offline)
mask |= BIT(cpu);
}
if (devices && devices->hotplug_dev) {
mutex_lock(&devices->hotplug_dev->clnt_lock);
for_each_cpu_mask(cpu,
devices->hotplug_dev->active_req.offline_mask)
mask |= BIT(cpu);
mutex_unlock(&devices->hotplug_dev->clnt_lock);
}
if (mask != cpus_offlined)
update_offline_cores(mask);
mutex_unlock(&core_control_mutex);
if (devices && devices->hotplug_dev) {
union device_request req;
req.offline_mask = CPU_MASK_NONE;
mutex_lock(&devices->hotplug_dev->clnt_lock);
for_each_cpu_mask(cpu,
devices->hotplug_dev->active_req.offline_mask)
if (mask & BIT(cpu))
cpumask_test_and_set_cpu(cpu,
&req.offline_mask);
list_for_each_entry(clnt,
&devices->hotplug_dev->client_list,
clnt_ptr) {
if (clnt->callback)
clnt->callback(clnt, &req,
clnt->usr_data);
}
mutex_unlock(&devices->hotplug_dev->clnt_lock);
}
sysfs_notify(cc_kobj, NULL, "cpus_offlined");
}
return ret;
}
#else
static void __ref do_core_control(long temp)
{
return;
}
static __ref int do_hotplug(void *data)
{
return 0;
}
static int __ref update_offline_cores(int val)
{
return 0;
}
#endif
static int do_gfx_phase_cond(void)
{
long temp = 0;
int ret = 0;
uint32_t new_req_band = curr_gfx_band;
if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled)
return ret;
mutex_lock(&gfx_mutex);
if (gfx_warm_phase_ctrl_enabled) {
ret = therm_get_temp(
thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->sensor_id,
thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->id_type,
&temp);
if (ret) {
pr_err("Unable to read TSENS sensor:%d. err:%d\n",
thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->sensor_id,
ret);
goto gfx_phase_cond_exit;
}
} else {
ret = therm_get_temp(
thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->sensor_id,
thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->id_type,
&temp);
if (ret) {
pr_err("Unable to read TSENS sensor:%d. err:%d\n",
thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->sensor_id,
ret);
goto gfx_phase_cond_exit;
}
}
switch (curr_gfx_band) {
case MSM_HOT_CRITICAL:
if (temp < (msm_thermal_info.gfx_phase_hot_temp_degC -
msm_thermal_info.gfx_phase_hot_temp_hyst_degC))
new_req_band = MSM_WARM;
break;
case MSM_WARM:
if (temp >= msm_thermal_info.gfx_phase_hot_temp_degC)
new_req_band = MSM_HOT_CRITICAL;
else if (temp < (msm_thermal_info.gfx_phase_warm_temp_degC -
msm_thermal_info.gfx_phase_warm_temp_hyst_degC))
new_req_band = MSM_NORMAL;
break;
case MSM_NORMAL:
if (temp >= msm_thermal_info.gfx_phase_warm_temp_degC)
new_req_band = MSM_WARM;
break;
default:
if (temp >= msm_thermal_info.gfx_phase_hot_temp_degC)
new_req_band = MSM_HOT_CRITICAL;
else if (temp >= msm_thermal_info.gfx_phase_warm_temp_degC)
new_req_band = MSM_WARM;
else
new_req_band = MSM_NORMAL;
break;
}
if (new_req_band != curr_gfx_band) {
ret = send_temperature_band(MSM_GFX_PHASE_CTRL, new_req_band);
if (!ret) {
pr_debug("Reached %d band. Temp:%ld\n", new_req_band,
temp);
curr_gfx_band = new_req_band;
} else {
pr_err("Error sending temp. band:%d. Temp:%ld. err:%d",
new_req_band, temp, ret);
}
}
gfx_phase_cond_exit:
mutex_unlock(&gfx_mutex);
return ret;
}
static int do_cx_phase_cond(void)
{
long temp = 0;
int i, ret = 0, dis_cnt = 0;
if (!cx_phase_ctrl_enabled)
return ret;
mutex_lock(&cx_mutex);
for (i = 0; i < thresh[MSM_CX_PHASE_CTRL_HOT].thresh_ct; i++) {
ret = therm_get_temp(
thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].sensor_id,
thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].id_type,
&temp);
if (ret) {
pr_err("Unable to read TSENS sensor:%d. err:%d\n",
thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].sensor_id,
ret);
dis_cnt++;
continue;
}
if (temp >= msm_thermal_info.cx_phase_hot_temp_degC) {
if (curr_cx_band != MSM_HOT_CRITICAL) {
ret = send_temperature_band(MSM_CX_PHASE_CTRL,
MSM_HOT_CRITICAL);
if (!ret) {
pr_debug("band:HOT_CRITICAL Temp:%ld\n",
temp);
curr_cx_band = MSM_HOT_CRITICAL;
} else {
pr_err("Error %d sending HOT_CRITICAL",
ret);
}
}
goto cx_phase_cond_exit;
} else if (temp < (msm_thermal_info.cx_phase_hot_temp_degC -
msm_thermal_info.cx_phase_hot_temp_hyst_degC))
dis_cnt++;
}
if (dis_cnt == max_tsens_num && curr_cx_band != MSM_WARM) {
ret = send_temperature_band(MSM_CX_PHASE_CTRL, MSM_WARM);
if (!ret) {
pr_debug("band:WARM Temp:%ld\n", temp);
curr_cx_band = MSM_WARM;
} else {
pr_err("Error sending WARM temp band. err:%d",
ret);
}
}
cx_phase_cond_exit:
mutex_unlock(&cx_mutex);
return ret;
}
static int do_ocr(void)
{
long temp = 0;
int ret = 0;
int i = 0, j = 0;
int pfm_cnt = 0;
if (!ocr_enabled)
return ret;
mutex_lock(&ocr_mutex);
for (i = 0; i < thresh[MSM_OCR].thresh_ct; i++) {
ret = therm_get_temp(
thresh[MSM_OCR].thresh_list[i].sensor_id,
thresh[MSM_OCR].thresh_list[i].id_type,
&temp);
if (ret) {
pr_err("Unable to read TSENS sensor %d. err:%d\n",
thresh[MSM_OCR].thresh_list[i].sensor_id,
ret);
pfm_cnt++;
continue;
}
if (temp > msm_thermal_info.ocr_temp_degC) {
if (ocr_rails[0].init != OPTIMUM_CURRENT_NR)
for (j = 0; j < ocr_rail_cnt; j++)
ocr_rails[j].init = OPTIMUM_CURRENT_NR;
ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
if (ret)
pr_err("Error setting max ocr. err:%d\n",
ret);
else
pr_debug("Requested MAX OCR. tsens:%d Temp:%ld",
thresh[MSM_OCR].thresh_list[i].sensor_id, temp);
goto do_ocr_exit;
} else if (temp <= (msm_thermal_info.ocr_temp_degC -
msm_thermal_info.ocr_temp_hyst_degC))
pfm_cnt++;
}
if (pfm_cnt == thresh[MSM_OCR].thresh_ct ||
ocr_rails[0].init != OPTIMUM_CURRENT_NR) {
/* 'init' not equal to OPTIMUM_CURRENT_NR means this is the
** first polling iteration after device probe. During first
** iteration, if temperature is less than the set point, clear
** the max current request made and reset the 'init'.
*/
if (ocr_rails[0].init != OPTIMUM_CURRENT_NR)
for (j = 0; j < ocr_rail_cnt; j++)
ocr_rails[j].init = OPTIMUM_CURRENT_NR;
ret = ocr_set_mode_all(OPTIMUM_CURRENT_MIN);
if (ret) {
pr_err("Error setting min ocr. err:%d\n",
ret);
goto do_ocr_exit;
} else {
pr_debug("Requested MIN OCR. Temp:%ld", temp);
}
}
do_ocr_exit:
mutex_unlock(&ocr_mutex);
return ret;
}
static int do_vdd_restriction(void)
{
long temp = 0;
int ret = 0;
int i = 0;
int dis_cnt = 0;
if (!vdd_rstr_enabled)
return ret;
if (usefreq && !freq_table_get) {
if (check_freq_table() && !core_ptr)
return ret;
}
mutex_lock(&vdd_rstr_mutex);
for (i = 0; i < thresh[MSM_VDD_RESTRICTION].thresh_ct; i++) {
ret = therm_get_temp(
thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
thresh[MSM_VDD_RESTRICTION].thresh_list[i].id_type,
&temp);
if (ret) {
pr_err("Unable to read TSENS sensor:%d. err:%d\n",
thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
ret);
dis_cnt++;
continue;
}
if (temp <= msm_thermal_info.vdd_rstr_temp_degC) {
ret = vdd_restriction_apply_all(1);
if (ret) {
pr_err( \
"Enable vdd rstr for all failed. err:%d\n",
ret);
goto exit;
}
pr_debug("Enabled Vdd Restriction tsens:%d. Temp:%ld\n",
thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
temp);
goto exit;
} else if (temp > msm_thermal_info.vdd_rstr_temp_hyst_degC)
dis_cnt++;
}
if (dis_cnt == max_tsens_num) {
ret = vdd_restriction_apply_all(0);
if (ret) {
pr_err("Disable vdd rstr for all failed. err:%d\n",
ret);
goto exit;
}
pr_debug("Disabled Vdd Restriction\n");
}
exit:
mutex_unlock(&vdd_rstr_mutex);
return ret;
}
static int do_psm(void)
{
long temp = 0;
int ret = 0;
int i = 0;
int auto_cnt = 0;
mutex_lock(&psm_mutex);
for (i = 0; i < max_tsens_num; i++) {
ret = therm_get_temp(tsens_id_map[i], THERM_TSENS_ID, &temp);
if (ret) {
pr_err("Unable to read TSENS sensor:%d. err:%d\n",
tsens_id_map[i], ret);
auto_cnt++;
continue;
}
/*
* As long as one sensor is above the threshold, set PWM mode
* on all rails, and loop stops. Set auto mode when all rails
* are below thershold
*/
if (temp > msm_thermal_info.psm_temp_degC) {
ret = psm_set_mode_all(PMIC_PWM_MODE);
if (ret) {
pr_err("Set pwm mode for all failed. err:%d\n",
ret);
goto exit;
}
pr_debug("Requested PMIC PWM Mode tsens:%d. Temp:%ld\n",
tsens_id_map[i], temp);
break;
} else if (temp <= msm_thermal_info.psm_temp_hyst_degC)
auto_cnt++;
}
if (auto_cnt == max_tsens_num) {
ret = psm_set_mode_all(PMIC_AUTO_MODE);
if (ret) {
pr_err("Set auto mode for all failed. err:%d\n", ret);
goto exit;
}
pr_debug("Requested PMIC AUTO Mode\n");
}
exit:
mutex_unlock(&psm_mutex);
return ret;
}
static void do_freq_control(long temp)
{
uint32_t cpu = 0;
uint32_t max_freq = cpus[cpu].limited_max_freq;
if (core_ptr)
return do_cluster_freq_ctrl(temp);
if (!freq_table_get)
return;
if (temp >= temp_threshold) {
if (limit_idx == limit_idx_low)
return;
limit_idx -= msm_thermal_info.bootup_freq_step;
if (limit_idx < limit_idx_low)
limit_idx = limit_idx_low;
max_freq = table[limit_idx].frequency;
} else if (temp < temp_threshold - msm_thermal_info.temp_hysteresis_degC) {
if (limit_idx == limit_idx_high)
return;
limit_idx += msm_thermal_info.bootup_freq_step;
if (limit_idx >= limit_idx_high) {
limit_idx = limit_idx_high;
max_freq = UINT_MAX;
} else
max_freq = table[limit_idx].frequency;
}
if (max_freq == cpus[cpu].limited_max_freq)
return;
/* Update new limits */
get_online_cpus();
for_each_possible_cpu(cpu) {
if (!(msm_thermal_info.bootup_freq_control_mask & BIT(cpu)))
continue;
pr_info("Limiting CPU%d max frequency to %u. Temp:%ld\n",
cpu, max_freq, temp);
cpus[cpu].limited_max_freq = max_freq;
if (!SYNC_CORE(cpu))
update_cpu_freq(cpu);
}
update_cluster_freq();
put_online_cpus();
}
static void check_temp(struct work_struct *work)
{
long temp = 0;
int ret = 0;
do_therm_reset();
ret = therm_get_temp(msm_thermal_info.sensor_id, THERM_TSENS_ID, &temp);
if (ret) {
pr_err("Unable to read TSENS sensor:%d. err:%d\n",
msm_thermal_info.sensor_id, ret);
goto reschedule;
}
do_core_control(temp);
do_vdd_mx();
do_psm();
do_gfx_phase_cond();
do_cx_phase_cond();
do_ocr();
/*
** All mitigation involving CPU frequency should be
** placed below this check. The mitigation following this
** frequency table check, should be able to handle the failure case.
*/
if (!freq_table_get)
check_freq_table();
do_vdd_restriction();
do_freq_control(temp);
reschedule:
if (polling_enabled)
schedule_delayed_work(&check_temp_work, msecs_to_jiffies(POLLING_DELAY));
}
static int __ref msm_thermal_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
uint32_t cpu = (uintptr_t)hcpu;
if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
if (core_control_enabled &&
(msm_thermal_info.core_control_mask & BIT(cpu)) &&
(cpus_offlined & BIT(cpu))) {
pr_debug("Preventing CPU%d from coming online.\n",
cpu);
return NOTIFY_BAD;
}
}
pr_debug("voting for CPU%d to be online\n", cpu);
return NOTIFY_OK;
}
static struct notifier_block __refdata msm_thermal_cpu_notifier = {
.notifier_call = msm_thermal_cpu_callback,
};
static int hotplug_notify(enum thermal_trip_type type, int temp, void *data)
{
struct cpu_info *cpu_node = (struct cpu_info *)data;
pr_info("%s reach temp threshold: %d\n", cpu_node->sensor_type, temp);
if (!(msm_thermal_info.core_control_mask & BIT(cpu_node->cpu)))
return 0;
switch (type) {
case THERMAL_TRIP_CONFIGURABLE_HI:
if (!(cpu_node->offline))
cpu_node->offline = 1;
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
if (cpu_node->offline)
cpu_node->offline = 0;
break;
default:
break;
}
if (hotplug_task) {
cpu_node->hotplug_thresh_clear = true;
complete(&hotplug_notify_complete);
} else
pr_err("Hotplug task is not initialized\n");
return 0;
}
/* Adjust cpus offlined bit based on temperature reading. */
static int hotplug_init_cpu_offlined(void)
{
long temp = 0;
uint32_t cpu = 0;
if (!hotplug_enabled)
return 0;
mutex_lock(&core_control_mutex);
for_each_possible_cpu(cpu) {
if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu)))
continue;
if (therm_get_temp(cpus[cpu].sensor_id, cpus[cpu].id_type,
&temp)) {
pr_err("Unable to read TSENS sensor:%d.\n",
cpus[cpu].sensor_id);
mutex_unlock(&core_control_mutex);
return -EINVAL;
}
if (temp >= msm_thermal_info.hotplug_temp_degC)
cpus[cpu].offline = 1;
else if (temp <= (msm_thermal_info.hotplug_temp_degC -
msm_thermal_info.hotplug_temp_hysteresis_degC))
cpus[cpu].offline = 0;
}
mutex_unlock(&core_control_mutex);
if (hotplug_task)
complete(&hotplug_notify_complete);
else {
pr_err("Hotplug task is not initialized\n");
return -EINVAL;
}
return 0;
}
static void hotplug_init(void)
{
uint32_t cpu = 0;
struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL;
if (hotplug_task)
return;
if (!hotplug_enabled)
goto init_kthread;
for_each_possible_cpu(cpu) {
cpus[cpu].sensor_id =
sensor_get_id((char *)cpus[cpu].sensor_type);
cpus[cpu].id_type = THERM_ZONE_ID;
if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu)))
continue;
hi_thresh = &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH];
low_thresh = &cpus[cpu].threshold[HOTPLUG_THRESHOLD_LOW];
hi_thresh->temp = msm_thermal_info.hotplug_temp_degC;
hi_thresh->trip = THERMAL_TRIP_CONFIGURABLE_HI;
low_thresh->temp = msm_thermal_info.hotplug_temp_degC -
msm_thermal_info.hotplug_temp_hysteresis_degC;
low_thresh->trip = THERMAL_TRIP_CONFIGURABLE_LOW;
hi_thresh->notify = low_thresh->notify = hotplug_notify;
hi_thresh->data = low_thresh->data = (void *)&cpus[cpu];
set_threshold(cpus[cpu].sensor_id, hi_thresh);
}
init_kthread:
init_completion(&hotplug_notify_complete);
hotplug_task = kthread_run(do_hotplug, NULL, "msm_thermal:hotplug");
if (IS_ERR(hotplug_task)) {
pr_err("Failed to create do_hotplug thread. err:%ld\n",
PTR_ERR(hotplug_task));
return;
}
/*
* Adjust cpus offlined bit when hotplug intitializes so that the new
* cpus offlined state is based on hotplug threshold range
*/
if (hotplug_init_cpu_offlined())
kthread_stop(hotplug_task);
}
static __ref int do_freq_mitigation(void *data)
{
int ret = 0;
uint32_t cpu = 0, max_freq_req = 0, min_freq_req = 0;
struct sched_param param = {.sched_priority = MAX_RT_PRIO-1};
struct device_clnt_data *clnt = NULL;
struct device_manager_data *cpu_dev = NULL;
sched_setscheduler(current, SCHED_FIFO, ¶m);
while (!kthread_should_stop()) {
while (wait_for_completion_interruptible(
&freq_mitigation_complete) != 0)
;
INIT_COMPLETION(freq_mitigation_complete);
for_each_possible_cpu(cpu) {
max_freq_req = (cpus[cpu].max_freq) ?
msm_thermal_info.freq_limit :
UINT_MAX;
max_freq_req = min(max_freq_req,
cpus[cpu].user_max_freq);
min_freq_req = max(min_freq_limit,
cpus[cpu].user_min_freq);
if (devices && devices->cpufreq_dev[cpu]) {
cpu_dev = devices->cpufreq_dev[cpu];
mutex_lock(&cpu_dev->clnt_lock);
max_freq_req = min(max_freq_req,
cpu_dev->active_req.freq.max_freq);
min_freq_req = max(min_freq_req,
cpu_dev->active_req.freq.min_freq);
mutex_unlock(&cpu_dev->clnt_lock);
}
if ((max_freq_req == cpus[cpu].limited_max_freq)
&& (min_freq_req ==
cpus[cpu].limited_min_freq))
goto reset_threshold;
cpus[cpu].limited_max_freq = max_freq_req;
cpus[cpu].limited_min_freq = min_freq_req;
if (!SYNC_CORE(cpu))
update_cpu_freq(cpu);
reset_threshold:
if (!SYNC_CORE(cpu) &&
devices && devices->cpufreq_dev[cpu]) {
union device_request req;
req.freq.max_freq = max_freq_req;
req.freq.min_freq = min_freq_req;
cpu_dev = devices->cpufreq_dev[cpu];
mutex_lock(&cpu_dev->clnt_lock);
list_for_each_entry(clnt,
&cpu_dev->client_list,
clnt_ptr) {
if (clnt->callback)
clnt->callback(clnt,
&req,
clnt->usr_data);
}
mutex_unlock(&cpu_dev->clnt_lock);
}
if (freq_mitigation_enabled &&
cpus[cpu].freq_thresh_clear) {
set_threshold(cpus[cpu].sensor_id,
&cpus[cpu].threshold[FREQ_THRESHOLD_HIGH]);
cpus[cpu].freq_thresh_clear = false;
}
}
update_cluster_freq();
}
return ret;
}
static int freq_mitigation_notify(enum thermal_trip_type type,
int temp, void *data)
{
struct cpu_info *cpu_node = (struct cpu_info *) data;
pr_debug("%s reached temp threshold: %d\n",
cpu_node->sensor_type, temp);
if (!(msm_thermal_info.freq_mitig_control_mask &
BIT(cpu_node->cpu)))
return 0;
switch (type) {
case THERMAL_TRIP_CONFIGURABLE_HI:
if (!cpu_node->max_freq) {
pr_info("Mitigating CPU%d frequency to %d\n",
cpu_node->cpu,
msm_thermal_info.freq_limit);
cpu_node->max_freq = true;
}
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
if (cpu_node->max_freq) {
pr_info("Removing frequency mitigation for CPU%d\n",
cpu_node->cpu);
cpu_node->max_freq = false;
}
break;
default:
break;
}
if (freq_mitigation_task) {
cpu_node->freq_thresh_clear = true;
complete(&freq_mitigation_complete);
} else {
pr_err("Frequency mitigation task is not initialized\n");
}
return 0;
}
static void freq_mitigation_init(void)
{
uint32_t cpu = 0;
struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL;
if (freq_mitigation_task)
return;
if (!freq_mitigation_enabled)
goto init_freq_thread;
for_each_possible_cpu(cpu) {
if (!(msm_thermal_info.freq_mitig_control_mask & BIT(cpu)))
continue;
hi_thresh = &cpus[cpu].threshold[FREQ_THRESHOLD_HIGH];
low_thresh = &cpus[cpu].threshold[FREQ_THRESHOLD_LOW];
hi_thresh->temp = msm_thermal_info.freq_mitig_temp_degc;
hi_thresh->trip = THERMAL_TRIP_CONFIGURABLE_HI;
low_thresh->temp = msm_thermal_info.freq_mitig_temp_degc -
msm_thermal_info.freq_mitig_temp_hysteresis_degc;
low_thresh->trip = THERMAL_TRIP_CONFIGURABLE_LOW;
hi_thresh->notify = low_thresh->notify =
freq_mitigation_notify;
hi_thresh->data = low_thresh->data = (void *)&cpus[cpu];
set_threshold(cpus[cpu].sensor_id, hi_thresh);
}
init_freq_thread:
init_completion(&freq_mitigation_complete);
freq_mitigation_task = kthread_run(do_freq_mitigation, NULL,
"msm_thermal:freq_mitig");
if (IS_ERR(freq_mitigation_task)) {
pr_err("Failed to create frequency mitigation thread. err:%ld\n",
PTR_ERR(freq_mitigation_task));
return;
}
}
int msm_thermal_get_freq_plan_size(uint32_t cluster, unsigned int *table_len)
{
uint32_t i = 0;
struct cluster_info *cluster_ptr = NULL;
if (!core_ptr) {
pr_err("Topology ptr not initialized\n");
return -ENODEV;
}
if (!table_len) {
pr_err("Invalid input\n");
return -EINVAL;
}
if (!freq_table_get)
check_freq_table();
for (; i < core_ptr->entity_count; i++) {
cluster_ptr = &core_ptr->child_entity_ptr[i];
if (cluster_ptr->cluster_id == cluster) {
if (!cluster_ptr->freq_table) {
pr_err("Cluster%d clock plan not initialized\n",
cluster);
return -EINVAL;
}
*table_len = cluster_ptr->freq_idx_high + 1;
return 0;
}
}
pr_err("Invalid cluster ID:%d\n", cluster);
return -EINVAL;
}
int msm_thermal_get_cluster_freq_plan(uint32_t cluster, unsigned int *table_ptr)
{
uint32_t i = 0;
struct cluster_info *cluster_ptr = NULL;
if (!core_ptr) {
pr_err("Topology ptr not initialized\n");
return -ENODEV;
}
if (!table_ptr) {
pr_err("Invalid input\n");
return -EINVAL;
}
if (!freq_table_get)
check_freq_table();
for (; i < core_ptr->entity_count; i++) {
cluster_ptr = &core_ptr->child_entity_ptr[i];
if (cluster_ptr->cluster_id == cluster)
break;
}
if (i == core_ptr->entity_count) {
pr_err("Invalid cluster ID:%d\n", cluster);
return -EINVAL;
}
if (!cluster_ptr->freq_table) {
pr_err("Cluster%d clock plan not initialized\n", cluster);
return -EINVAL;
}
for (i = 0; i <= cluster_ptr->freq_idx_high; i++)
table_ptr[i] = cluster_ptr->freq_table[i].frequency;
return 0;
}
int msm_thermal_set_cluster_freq(uint32_t cluster, uint32_t freq, bool is_max)
{
int ret = 0;
uint32_t i = 0;
struct cluster_info *cluster_ptr = NULL;
bool notify = false;
if (!core_ptr) {
pr_err("Topology ptr not initialized\n");
return -ENODEV;
}
for (; i < core_ptr->entity_count; i++) {
cluster_ptr = &core_ptr->child_entity_ptr[i];
if (cluster_ptr->cluster_id != cluster)
continue;
if (!cluster_ptr->sync_cluster) {
pr_err("Cluster%d is not synchronous\n", cluster);
return -EINVAL;
} else {
pr_debug("Update Cluster%d %s frequency to %d\n",
cluster, (is_max) ? "max" : "min", freq);
break;
}
}
if (i == core_ptr->entity_count) {
pr_err("Invalid cluster ID:%d\n", cluster);
return -EINVAL;
}
for_each_cpu_mask(i, cluster_ptr->cluster_cores) {
uint32_t *freq_ptr = (is_max) ? &cpus[i].user_max_freq
: &cpus[i].user_min_freq;
if (*freq_ptr == freq)
continue;
notify = true;
*freq_ptr = freq;
}
if (freq_mitigation_task) {
if (notify)
complete(&freq_mitigation_complete);
} else {
pr_err("Frequency mitigation task is not initialized\n");
return -ESRCH;
}
return ret;
}
int msm_thermal_set_frequency(uint32_t cpu, uint32_t freq, bool is_max)
{
int ret = 0;
if (cpu >= num_possible_cpus()) {
pr_err("Invalid input\n");
ret = -EINVAL;
goto set_freq_exit;
}
pr_debug("Userspace requested %s frequency %u for CPU%u\n",
(is_max) ? "Max" : "Min", freq, cpu);
if (is_max) {
if (cpus[cpu].user_max_freq == freq)
goto set_freq_exit;
cpus[cpu].user_max_freq = freq;
} else {
if (cpus[cpu].user_min_freq == freq)
goto set_freq_exit;
cpus[cpu].user_min_freq = freq;
}
if (freq_mitigation_task) {
complete(&freq_mitigation_complete);
} else {
pr_err("Frequency mitigation task is not initialized\n");
ret = -ESRCH;
goto set_freq_exit;
}
set_freq_exit:
return ret;
}
int therm_set_threshold(struct threshold_info *thresh_inp)
{
int ret = 0, i = 0, err = 0;
struct therm_threshold *thresh_ptr;
if (!thresh_inp) {
pr_err("Invalid input\n");
ret = -EINVAL;
goto therm_set_exit;
}
thresh_inp->thresh_triggered = false;
for (i = 0; i < thresh_inp->thresh_ct; i++) {
thresh_ptr = &thresh_inp->thresh_list[i];
thresh_ptr->trip_triggered = -1;
err = set_threshold(thresh_ptr->sensor_id,
thresh_ptr->threshold);
if (err) {
ret = err;
err = 0;
}
}
therm_set_exit:
return ret;
}
static void cx_phase_ctrl_notify(struct therm_threshold *trig_thresh)
{
static uint32_t cx_sens_status;
int ret = 0;
if (!cx_phase_ctrl_enabled)
return;
if (trig_thresh->trip_triggered < 0)
goto cx_phase_ctrl_exit;
mutex_lock(&cx_mutex);
pr_debug("sensor:%d reached %s thresh for CX\n",
tsens_id_map[trig_thresh->sensor_id],
(trig_thresh->trip_triggered == THERMAL_TRIP_CONFIGURABLE_HI) ?
"hot critical" : "warm");
switch (trig_thresh->trip_triggered) {
case THERMAL_TRIP_CONFIGURABLE_HI:
cx_sens_status |= BIT(trig_thresh->sensor_id);
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
if (cx_sens_status & BIT(trig_thresh->sensor_id))
cx_sens_status ^= BIT(trig_thresh->sensor_id);
break;
default:
pr_err("Unsupported trip type\n");
goto cx_phase_unlock_exit;
break;
}
if ((cx_sens_status && (curr_cx_band == MSM_HOT_CRITICAL)) ||
(!cx_sens_status && (curr_cx_band == MSM_WARM)))
goto cx_phase_unlock_exit;
ret = send_temperature_band(MSM_CX_PHASE_CTRL, (cx_sens_status) ?
MSM_HOT_CRITICAL : MSM_WARM);
if (!ret)
curr_cx_band = (cx_sens_status) ? MSM_HOT_CRITICAL : MSM_WARM;
cx_phase_unlock_exit:
mutex_unlock(&cx_mutex);
cx_phase_ctrl_exit:
set_threshold(trig_thresh->sensor_id, trig_thresh->threshold);
return;
}
static void gfx_phase_ctrl_notify(struct therm_threshold *trig_thresh)
{
uint32_t new_req_band = curr_gfx_band;
int ret = 0;
if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled)
return;
if (trig_thresh->trip_triggered < 0)
goto gfx_phase_ctrl_exit;
mutex_lock(&gfx_mutex);
if (gfx_crit_phase_ctrl_enabled) {
switch (
thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->trip_triggered) {
case THERMAL_TRIP_CONFIGURABLE_HI:
new_req_band = MSM_HOT_CRITICAL;
pr_debug(
"sensor:%d reached hot critical thresh for GFX\n",
tsens_id_map[trig_thresh->sensor_id]);
goto notify_new_band;
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
new_req_band = MSM_WARM;
pr_debug("sensor:%d reached warm thresh for GFX\n",
tsens_id_map[trig_thresh->sensor_id]);
goto notify_new_band;
break;
default:
break;
}
}
if (gfx_warm_phase_ctrl_enabled) {
switch (
thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->trip_triggered) {
case THERMAL_TRIP_CONFIGURABLE_HI:
new_req_band = MSM_WARM;
pr_debug("sensor:%d reached warm thresh for GFX\n",
tsens_id_map[trig_thresh->sensor_id]);
goto notify_new_band;
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
new_req_band = MSM_NORMAL;
pr_debug("sensor:%d reached normal thresh for GFX\n",
tsens_id_map[trig_thresh->sensor_id]);
goto notify_new_band;
break;
default:
break;
}
}
notify_new_band:
if (new_req_band != curr_gfx_band) {
ret = send_temperature_band(MSM_GFX_PHASE_CTRL, new_req_band);
if (!ret)
curr_gfx_band = new_req_band;
}
mutex_unlock(&gfx_mutex);
gfx_phase_ctrl_exit:
switch (curr_gfx_band) {
case MSM_HOT_CRITICAL:
if (gfx_crit_phase_ctrl_enabled)
therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]);
break;
case MSM_NORMAL:
if (gfx_warm_phase_ctrl_enabled)
therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]);
break;
case MSM_WARM:
default:
if (gfx_crit_phase_ctrl_enabled)
therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]);
if (gfx_warm_phase_ctrl_enabled)
therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]);
break;
}
return;
}
static void vdd_restriction_notify(struct therm_threshold *trig_thresh)
{
int ret = 0;
static uint32_t vdd_sens_status;
if (!vdd_rstr_enabled)
return;
if (!trig_thresh) {
pr_err("Invalid input\n");
return;
}
if (trig_thresh->trip_triggered < 0)
goto set_and_exit;
mutex_lock(&vdd_rstr_mutex);
pr_debug("sensor:%d reached %s thresh for Vdd restriction\n",
tsens_id_map[trig_thresh->sensor_id],
(trig_thresh->trip_triggered == THERMAL_TRIP_CONFIGURABLE_HI) ?
"high" : "low");
switch (trig_thresh->trip_triggered) {
case THERMAL_TRIP_CONFIGURABLE_HI:
if (vdd_sens_status & BIT(trig_thresh->sensor_id))
vdd_sens_status ^= BIT(trig_thresh->sensor_id);
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
vdd_sens_status |= BIT(trig_thresh->sensor_id);
break;
default:
pr_err("Unsupported trip type\n");
goto unlock_and_exit;
break;
}
ret = vdd_restriction_apply_all((vdd_sens_status) ? 1 : 0);
if (ret) {
pr_err("%s vdd rstr votlage for all failed\n",
(vdd_sens_status) ?
"Enable" : "Disable");
goto unlock_and_exit;
}
unlock_and_exit:
mutex_unlock(&vdd_rstr_mutex);
set_and_exit:
set_threshold(trig_thresh->sensor_id, trig_thresh->threshold);
return;
}
static void ocr_notify(struct therm_threshold *trig_thresh)
{
int ret = 0;
static uint32_t ocr_sens_status;
if (!ocr_enabled)
return;
if (!trig_thresh) {
pr_err("Invalid input\n");
return;
}
if (trig_thresh->trip_triggered < 0)
goto set_and_exit;
mutex_lock(&ocr_mutex);
pr_debug("sensor%d reached %d thresh for Optimum current request\n",
tsens_id_map[trig_thresh->sensor_id],
trig_thresh->trip_triggered);
switch (trig_thresh->trip_triggered) {
case THERMAL_TRIP_CONFIGURABLE_HI:
ocr_sens_status |= BIT(trig_thresh->sensor_id);
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
if (ocr_sens_status & BIT(trig_thresh->sensor_id))
ocr_sens_status ^= BIT(trig_thresh->sensor_id);
break;
default:
pr_err("Unsupported trip type\n");
goto unlock_and_exit;
break;
}
ret = ocr_set_mode_all(ocr_sens_status ? OPTIMUM_CURRENT_MAX :
OPTIMUM_CURRENT_MIN);
if (ret) {
pr_err("%s Optimum current mode for all failed. err:%d\n",
(ocr_sens_status) ?
"Enable" : "Disable", ret);
goto unlock_and_exit;
}
unlock_and_exit:
mutex_unlock(&ocr_mutex);
set_and_exit:
set_threshold(trig_thresh->sensor_id, trig_thresh->threshold);
return;
}
static __ref int do_thermal_monitor(void *data)
{
int ret = 0, i, j;
struct therm_threshold *sensor_list;
while (!kthread_should_stop()) {
while (wait_for_completion_interruptible(
&thermal_monitor_complete) != 0)
;
INIT_COMPLETION(thermal_monitor_complete);
for (i = 0; i < MSM_LIST_MAX_NR; i++) {
if (!thresh[i].thresh_triggered)
continue;
thresh[i].thresh_triggered = false;
for (j = 0; j < thresh[i].thresh_ct; j++) {
sensor_list = &thresh[i].thresh_list[j];
if (sensor_list->trip_triggered < 0)
continue;
sensor_list->notify(sensor_list);
sensor_list->trip_triggered = -1;
}
}
}
return ret;
}
static int convert_to_zone_id(struct threshold_info *thresh_inp)
{
int ret = 0, i, zone_id;
struct therm_threshold *thresh_array;
if (!thresh_inp) {
pr_err("Invalid input\n");
ret = -EINVAL;
goto convert_to_exit;
}
thresh_array = thresh_inp->thresh_list;
for (i = 0; i < thresh_inp->thresh_ct; i++) {
char tsens_name[TSENS_NAME_MAX] = "";
if (thresh_array[i].id_type == THERM_ZONE_ID)
continue;
snprintf(tsens_name, TSENS_NAME_MAX, TSENS_NAME_FORMAT,
thresh_array[i].sensor_id);
zone_id = sensor_get_id(tsens_name);
if (zone_id < 0) {
pr_err("Error getting zone id for %s. err:%d\n",
tsens_name, ret);
ret = zone_id;
goto convert_to_exit;
}
thresh_array[i].sensor_id = zone_id;
thresh_array[i].id_type = THERM_ZONE_ID;
}
convert_to_exit:
return ret;
}
static void thermal_monitor_init(void)
{
if (thermal_monitor_task)
return;
init_completion(&thermal_monitor_complete);
thermal_monitor_task = kthread_run(do_thermal_monitor, NULL,
"msm_thermal:therm_monitor");
if (IS_ERR(thermal_monitor_task)) {
pr_err("Failed to create thermal monitor thread. err:%ld\n",
PTR_ERR(thermal_monitor_task));
goto init_exit;
}
if (therm_reset_enabled &&
!(convert_to_zone_id(&thresh[MSM_THERM_RESET])))
therm_set_threshold(&thresh[MSM_THERM_RESET]);
if ((cx_phase_ctrl_enabled) &&
!(convert_to_zone_id(&thresh[MSM_CX_PHASE_CTRL_HOT])))
therm_set_threshold(&thresh[MSM_CX_PHASE_CTRL_HOT]);
if ((vdd_rstr_enabled) &&
!(convert_to_zone_id(&thresh[MSM_VDD_RESTRICTION])))
therm_set_threshold(&thresh[MSM_VDD_RESTRICTION]);
if ((gfx_warm_phase_ctrl_enabled) &&
!(convert_to_zone_id(&thresh[MSM_GFX_PHASE_CTRL_WARM]))) {
therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]);
}
if ((gfx_crit_phase_ctrl_enabled) &&
!(convert_to_zone_id(&thresh[MSM_GFX_PHASE_CTRL_HOT]))) {
therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]);
}
if ((ocr_enabled) &&
!(convert_to_zone_id(&thresh[MSM_OCR])))
therm_set_threshold(&thresh[MSM_OCR]);
if (vdd_mx_enabled &&
!(convert_to_zone_id(&thresh[MSM_VDD_MX_RESTRICTION])))
therm_set_threshold(&thresh[MSM_VDD_MX_RESTRICTION]);
init_exit:
return;
}
static int msm_thermal_notify(enum thermal_trip_type type, int temp, void *data)
{
struct therm_threshold *thresh_data = (struct therm_threshold *)data;
if (thermal_monitor_task) {
thresh_data->trip_triggered = type;
thresh_data->parent->thresh_triggered = true;
complete(&thermal_monitor_complete);
} else {
pr_err("Thermal monitor task is not initialized\n");
}
return 0;
}
static int init_threshold(enum msm_thresh_list index,
int sensor_id, int32_t hi_temp, int32_t low_temp,
void (*callback)(struct therm_threshold *))
{
int ret = 0, i;
struct therm_threshold *thresh_ptr;
if (!callback || index >= MSM_LIST_MAX_NR || index < 0
|| sensor_id == -ENODEV) {
pr_err("Invalid input. sensor:%d. index:%d\n",
sensor_id, index);
ret = -EINVAL;
goto init_thresh_exit;
}
if (thresh[index].thresh_list) {
pr_info("threshold id:%d already initialized\n", index);
goto init_thresh_exit;
}
thresh[index].thresh_ct = (sensor_id == MONITOR_ALL_TSENS) ?
max_tsens_num : 1;
thresh[index].thresh_triggered = false;
thresh[index].thresh_list = kzalloc(sizeof(struct therm_threshold) *
thresh[index].thresh_ct, GFP_KERNEL);
if (!thresh[index].thresh_list) {
pr_err("kzalloc failed for thresh index:%d\n", index);
ret = -ENOMEM;
goto init_thresh_exit;
}
thresh_ptr = thresh[index].thresh_list;
if (sensor_id == MONITOR_ALL_TSENS) {
for (i = 0; i < max_tsens_num; i++) {
thresh_ptr[i].sensor_id = tsens_id_map[i];
thresh_ptr[i].id_type = THERM_TSENS_ID;
thresh_ptr[i].notify = callback;
thresh_ptr[i].trip_triggered = -1;
thresh_ptr[i].parent = &thresh[index];
thresh_ptr[i].threshold[0].temp = hi_temp;
thresh_ptr[i].threshold[0].trip =
THERMAL_TRIP_CONFIGURABLE_HI;
thresh_ptr[i].threshold[1].temp = low_temp;
thresh_ptr[i].threshold[1].trip =
THERMAL_TRIP_CONFIGURABLE_LOW;
thresh_ptr[i].threshold[0].notify =
thresh_ptr[i].threshold[1].notify = msm_thermal_notify;
thresh_ptr[i].threshold[0].data =
thresh_ptr[i].threshold[1].data =
(void *)&thresh_ptr[i];
}
} else {
thresh_ptr->sensor_id = sensor_id;
thresh_ptr->id_type = THERM_TSENS_ID;
thresh_ptr->notify = callback;
thresh_ptr->trip_triggered = -1;
thresh_ptr->parent = &thresh[index];
thresh_ptr->threshold[0].temp = hi_temp;
thresh_ptr->threshold[0].trip =
THERMAL_TRIP_CONFIGURABLE_HI;
thresh_ptr->threshold[1].temp = low_temp;
thresh_ptr->threshold[1].trip =
THERMAL_TRIP_CONFIGURABLE_LOW;
thresh_ptr->threshold[0].notify =
thresh_ptr->threshold[1].notify = msm_thermal_notify;
thresh_ptr->threshold[0].data =
thresh_ptr->threshold[1].data = (void *)thresh_ptr;
}
init_thresh_exit:
return ret;
}
static int msm_thermal_add_gfx_nodes(void)
{
struct kobject *module_kobj = NULL;
struct kobject *gfx_kobj = NULL;
int ret = 0;
if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled)
return -EINVAL;
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
pr_err("cannot find kobject\n");
ret = -ENOENT;
goto gfx_node_exit;
}
gfx_kobj = kobject_create_and_add("gfx_phase_ctrl", module_kobj);
if (!gfx_kobj) {
pr_err("cannot create gfx kobject\n");
ret = -ENOMEM;
goto gfx_node_exit;
}
gfx_attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 2, GFP_KERNEL);
if (!gfx_attr_gp.attrs) {
pr_err("kzalloc failed\n");
ret = -ENOMEM;
goto gfx_node_fail;
}
PHASE_RW_ATTR(gfx, temp_band, gfx_mode_attr, 0, gfx_attr_gp);
gfx_attr_gp.attrs[1] = NULL;
ret = sysfs_create_group(gfx_kobj, &gfx_attr_gp);
if (ret) {
pr_err("cannot create GFX attribute group. err:%d\n", ret);
goto gfx_node_fail;
}
gfx_node_fail:
if (ret) {
kobject_put(gfx_kobj);
kfree(gfx_attr_gp.attrs);
gfx_attr_gp.attrs = NULL;
}
gfx_node_exit:
return ret;
}
static int msm_thermal_add_cx_nodes(void)
{
struct kobject *module_kobj = NULL;
struct kobject *cx_kobj = NULL;
int ret = 0;
if (!cx_phase_ctrl_enabled)
return -EINVAL;
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
pr_err("cannot find kobject\n");
ret = -ENOENT;
goto cx_node_exit;
}
cx_kobj = kobject_create_and_add("cx_phase_ctrl", module_kobj);
if (!cx_kobj) {
pr_err("cannot create cx kobject\n");
ret = -ENOMEM;
goto cx_node_exit;
}
cx_attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 2, GFP_KERNEL);
if (!cx_attr_gp.attrs) {
pr_err("kzalloc failed\n");
ret = -ENOMEM;
goto cx_node_fail;
}
PHASE_RW_ATTR(cx, temp_band, cx_mode_attr, 0, cx_attr_gp);
cx_attr_gp.attrs[1] = NULL;
ret = sysfs_create_group(cx_kobj, &cx_attr_gp);
if (ret) {
pr_err("cannot create CX attribute group. err:%d\n", ret);
goto cx_node_fail;
}
cx_node_fail:
if (ret) {
kobject_put(cx_kobj);
kfree(cx_attr_gp.attrs);
cx_attr_gp.attrs = NULL;
}
cx_node_exit:
return ret;
}
/*
* We will reset the cpu frequencies limits here. The core online/offline
* status will be carried over to the process stopping the msm_thermal, as
* we dont want to online a core and bring in the thermal issues.
*/
static void __ref disable_msm_thermal(void)
{
uint32_t cpu = 0;
/* make sure check_temp is no longer running */
cancel_delayed_work_sync(&check_temp_work);
get_online_cpus();
for_each_possible_cpu(cpu) {
if (cpus[cpu].limited_max_freq == UINT_MAX &&
cpus[cpu].limited_min_freq == 0)
continue;
pr_info("Max frequency reset for CPU%d\n", cpu);
cpus[cpu].limited_max_freq = UINT_MAX;
cpus[cpu].limited_min_freq = 0;
if (!SYNC_CORE(cpu))
update_cpu_freq(cpu);
}
update_cluster_freq();
put_online_cpus();
}
static void interrupt_mode_init(void)
{
if (!msm_thermal_probed) {
interrupt_mode_enable = true;
return;
}
if (polling_enabled) {
pr_info("Interrupt mode init\n");
polling_enabled = 0;
disable_msm_thermal();
hotplug_init();
freq_mitigation_init();
thermal_monitor_init();
msm_thermal_add_cx_nodes();
msm_thermal_add_gfx_nodes();
}
}
static int __ref set_enabled(const char *val, const struct kernel_param *kp)
{
int ret = 0;
ret = param_set_bool(val, kp);
if (!enabled)
interrupt_mode_init();
else
pr_info("no action for enabled = %d\n",
enabled);
pr_info("enabled = %d\n", enabled);
return ret;
}
static struct kernel_param_ops module_ops = {
.set = set_enabled,
.get = param_get_bool,
};
module_param_cb(enabled, &module_ops, &enabled, 0644);
MODULE_PARM_DESC(enabled, "enforce thermal limit on cpu");
static ssize_t show_cc_enabled(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", core_control_enabled);
}
static ssize_t __ref store_cc_enabled(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int ret = 0;
int val = 0;
uint32_t cpu = 0;
ret = kstrtoint(buf, 10, &val);
if (ret) {
pr_err("Invalid input %s. err:%d\n", buf, ret);
goto done_store_cc;
}
if (core_control_enabled == !!val)
goto done_store_cc;
core_control_enabled = !!val;
if (core_control_enabled) {
pr_info("Core control enabled\n");
register_cpu_notifier(&msm_thermal_cpu_notifier);
/*
* Re-evaluate thermal core condition, update current status
* and set threshold for all cpus.
*/
hotplug_init_cpu_offlined();
mutex_lock(&core_control_mutex);
update_offline_cores(cpus_offlined);
if (hotplug_enabled) {
for_each_possible_cpu(cpu) {
if (!(msm_thermal_info.core_control_mask &
BIT(cpus[cpu].cpu)))
continue;
set_threshold(cpus[cpu].sensor_id,
&cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]);
}
}
mutex_unlock(&core_control_mutex);
} else {
pr_info("Core control disabled\n");
unregister_cpu_notifier(&msm_thermal_cpu_notifier);
}
done_store_cc:
return count;
}
static ssize_t show_cpus_offlined(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", cpus_offlined);
}
static ssize_t __ref store_cpus_offlined(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int ret = 0;
uint32_t val = 0;
uint32_t cpu;
mutex_lock(&core_control_mutex);
ret = kstrtouint(buf, 10, &val);
if (ret) {
pr_err("Invalid input %s. err:%d\n", buf, ret);
goto done_cc;
}
if (polling_enabled) {
pr_err("Ignoring request; polling thread is enabled.\n");
goto done_cc;
}
for_each_possible_cpu(cpu) {
if (!(msm_thermal_info.core_control_mask & BIT(cpu)))
continue;
cpus[cpu].user_offline = !!(val & BIT(cpu));
pr_debug("\"%s\"(PID:%i) requests %s CPU%d.\n", current->comm,
current->pid, (cpus[cpu].user_offline) ? "offline" :
"online", cpu);
}
if (hotplug_task)
complete(&hotplug_notify_complete);
else
pr_err("Hotplug task is not initialized\n");
done_cc:
mutex_unlock(&core_control_mutex);
return count;
}
static __refdata struct kobj_attribute cc_enabled_attr =
__ATTR(enabled, 0644, show_cc_enabled, store_cc_enabled);
static __refdata struct kobj_attribute cpus_offlined_attr =
__ATTR(cpus_offlined, 0644, show_cpus_offlined, store_cpus_offlined);
static __refdata struct attribute *cc_attrs[] = {
&cc_enabled_attr.attr,
&cpus_offlined_attr.attr,
NULL,
};
static __refdata struct attribute_group cc_attr_group = {
.attrs = cc_attrs,
};
static __init int msm_thermal_add_cc_nodes(void)
{
struct kobject *module_kobj = NULL;
int ret = 0;
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
pr_err("cannot find kobject\n");
ret = -ENOENT;
goto done_cc_nodes;
}
cc_kobj = kobject_create_and_add("core_control", module_kobj);
if (!cc_kobj) {
pr_err("cannot create core control kobj\n");
ret = -ENOMEM;
goto done_cc_nodes;
}
ret = sysfs_create_group(cc_kobj, &cc_attr_group);
if (ret) {
pr_err("cannot create sysfs group. err:%d\n", ret);
goto done_cc_nodes;
}
return 0;
done_cc_nodes:
if (cc_kobj)
kobject_del(cc_kobj);
return ret;
}
static ssize_t show_mx_enabled(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", vdd_mx_enabled);
}
static ssize_t __ref store_mx_enabled(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int ret = 0;
int val = 0;
ret = kstrtoint(buf, 10, &val);
if (ret) {
pr_err("Invalid input %s\n", buf);
goto done_store_mx;
}
if (vdd_mx_enabled == !!val)
goto done_store_mx;
vdd_mx_enabled = !!val;
mutex_lock(&vdd_mx_mutex);
if (!vdd_mx_enabled)
remove_vdd_mx_restriction();
else if (!(convert_to_zone_id(&thresh[MSM_VDD_MX_RESTRICTION])))
therm_set_threshold(&thresh[MSM_VDD_MX_RESTRICTION]);
mutex_unlock(&vdd_mx_mutex);
done_store_mx:
return count;
}
static __init int msm_thermal_add_mx_nodes(void)
{
struct kobject *module_kobj = NULL;
int ret = 0;
if (!vdd_mx_enabled)
return -EINVAL;
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
pr_err("cannot find kobject for module\n");
ret = -ENOENT;
goto done_mx_nodes;
}
mx_kobj = kobject_create_and_add("vdd_mx", module_kobj);
if (!mx_kobj) {
pr_err("cannot create mx restriction kobj\n");
ret = -ENOMEM;
goto done_mx_nodes;
}
mx_attr_group.attrs = kzalloc(sizeof(struct attribute *) * 2,
GFP_KERNEL);
if (!mx_attr_group.attrs) {
ret = -ENOMEM;
pr_err("cannot allocate memory for mx_attr_group.attrs");
goto done_mx_nodes;
}
MX_RW_ATTR(mx_enabled_attr, enabled, mx_attr_group);
mx_attr_group.attrs[1] = NULL;
ret = sysfs_create_group(mx_kobj, &mx_attr_group);
if (ret) {
pr_err("cannot create group\n");
goto done_mx_nodes;
}
done_mx_nodes:
if (ret) {
if (mx_kobj)
kobject_del(mx_kobj);
kfree(mx_attr_group.attrs);
}
return ret;
}
static void msm_thermal_panic_notifier_init(struct device *dev)
{
int i;
tsens_temp_at_panic = devm_kzalloc(dev,
sizeof(long) * max_tsens_num,
GFP_KERNEL);
if (!tsens_temp_at_panic) {
pr_err("kzalloc failed\n");
return;
}
for (i = 0; i < max_tsens_num; i++)
tsens_temp_at_panic[i] = LONG_MIN;
atomic_notifier_chain_register(&panic_notifier_list,
&msm_thermal_panic_notifier);
}
int msm_thermal_pre_init(struct device *dev)
{
int ret = 0;
if (tsens_is_ready() <= 0) {
pr_err("Tsens driver is not ready yet\n");
return -EPROBE_DEFER;
}
ret = tsens_get_max_sensor_num(&max_tsens_num);
if (ret < 0) {
pr_err("failed to get max sensor number, err:%d\n", ret);
return ret;
}
if (create_sensor_id_map()) {
pr_err("Creating sensor id map failed\n");
ret = -EINVAL;
goto pre_init_exit;
}
if (!tsens_temp_at_panic)
msm_thermal_panic_notifier_init(dev);
if (!thresh) {
thresh = kzalloc(
sizeof(struct threshold_info) * MSM_LIST_MAX_NR,
GFP_KERNEL);
if (!thresh) {
pr_err("kzalloc failed\n");
ret = -ENOMEM;
goto pre_init_exit;
}
memset(thresh, 0, sizeof(struct threshold_info) *
MSM_LIST_MAX_NR);
}
pre_init_exit:
return ret;
}
static int devmgr_devices_init(struct platform_device *pdev)
{
int ret = 0;
uint32_t cpu;
struct device_manager_data *dev_mgr = NULL;
devices = devm_kzalloc(&pdev->dev,
sizeof(struct devmgr_devices),
GFP_KERNEL);
if (!devices) {
pr_err("Malloc failed for devmgr devices\n");
ret = -ENOMEM;
goto device_exit;
}
if (num_possible_cpus() > 1) {
/* Add hotplug device */
dev_mgr = devm_kzalloc(&pdev->dev,
sizeof(struct device_manager_data),
GFP_KERNEL);
if (!dev_mgr) {
pr_err("Malloc failed for hotplug device\n");
ret = -ENOMEM;
goto device_exit;
}
snprintf(dev_mgr->device_name,
TSENS_NAME_MAX, HOTPLUG_DEVICE);
dev_mgr->request_validate =
devmgr_hotplug_client_request_validate_and_update;
dev_mgr->update = devmgr_client_hotplug_update;
HOTPLUG_NO_MITIGATION(&dev_mgr->active_req.offline_mask);
mutex_init(&dev_mgr->clnt_lock);
INIT_LIST_HEAD(&dev_mgr->client_list);
list_add_tail(&dev_mgr->dev_ptr, &devices_list);
devices->hotplug_dev = dev_mgr;
}
/* Add cpu devices */
for_each_possible_cpu(cpu) {
dev_mgr = devm_kzalloc(&pdev->dev,
sizeof(struct device_manager_data),
GFP_KERNEL);
if (!dev_mgr) {
pr_err("Malloc failed for cpu%d device\n", cpu);
ret = -ENOMEM;
goto device_exit;
}
snprintf(dev_mgr->device_name, TSENS_NAME_MAX, CPU_DEVICE, cpu);
dev_mgr->request_validate =
devmgr_cpufreq_client_request_validate_and_update;
dev_mgr->update = devmgr_client_cpufreq_update;
dev_mgr->active_req.freq.max_freq = CPUFREQ_MAX_NO_MITIGATION;
dev_mgr->active_req.freq.min_freq = CPUFREQ_MIN_NO_MITIGATION;
mutex_init(&dev_mgr->clnt_lock);
INIT_LIST_HEAD(&dev_mgr->client_list);
list_add_tail(&dev_mgr->dev_ptr, &devices_list);
devices->cpufreq_dev[cpu] = dev_mgr;
}
device_exit:
if (ret) {
if (devices) {
if (devices->hotplug_dev)
devm_kfree(&pdev->dev,
devices->hotplug_dev);
for_each_possible_cpu(cpu) {
if (devices->cpufreq_dev[cpu])
devm_kfree(&pdev->dev,
devices->cpufreq_dev[cpu]);
}
}
}
return ret;
}
int msm_thermal_init(struct msm_thermal_data *pdata)
{
int ret = 0;
uint32_t cpu;
ret = devmgr_devices_init(pdata->pdev);
if (ret)
pr_err("cannot initialize devm devices. err:%d\n", ret);
for_each_possible_cpu(cpu) {
cpus[cpu].cpu = cpu;
cpus[cpu].offline = 0;
cpus[cpu].user_offline = 0;
cpus[cpu].hotplug_thresh_clear = false;
cpus[cpu].max_freq = false;
cpus[cpu].user_max_freq = UINT_MAX;
cpus[cpu].user_min_freq = 0;
cpus[cpu].limited_max_freq = UINT_MAX;
cpus[cpu].limited_min_freq = 0;
cpus[cpu].freq_thresh_clear = false;
}
BUG_ON(!pdata);
memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
if (check_sensor_id(msm_thermal_info.sensor_id)) {
pr_err("Invalid sensor:%d for polling\n",
msm_thermal_info.sensor_id);
return -EINVAL;
}
enabled = 1;
polling_enabled = 1;
ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier,
CPUFREQ_POLICY_NOTIFIER);
if (ret)
pr_err("cannot register cpufreq notifier. err:%d\n", ret);
INIT_DELAYED_WORK(&check_temp_work, check_temp);
schedule_delayed_work(&check_temp_work, 0);
if (num_possible_cpus() > 1)
register_cpu_notifier(&msm_thermal_cpu_notifier);
return ret;
}
static int ocr_reg_init(struct platform_device *pdev)
{
int ret = 0;
int i, j;
for (i = 0; i < ocr_rail_cnt; i++) {
/* Check if vdd_restriction has already initialized any
* regualtor handle. If so use the same handle.*/
for (j = 0; j < rails_cnt; j++) {
if (!strcmp(ocr_rails[i].name, rails[j].name)) {
if (rails[j].reg == NULL)
break;
ocr_rails[i].phase_reg = rails[j].reg;
goto reg_init;
}
}
ocr_rails[i].phase_reg = devm_regulator_get(&pdev->dev,
ocr_rails[i].name);
if (IS_ERR_OR_NULL(ocr_rails[i].phase_reg)) {
ret = PTR_ERR(ocr_rails[i].phase_reg);
if (ret != -EPROBE_DEFER) {
pr_err("Could not get regulator: %s, err:%d\n",
ocr_rails[i].name, ret);
ocr_rails[i].phase_reg = NULL;
ocr_rails[i].mode = 0;
ocr_rails[i].init = 0;
}
return ret;
}
reg_init:
ocr_rails[i].mode = OPTIMUM_CURRENT_MIN;
}
return ret;
}
static int vdd_restriction_reg_init(struct platform_device *pdev)
{
int ret = 0;
int i;
for (i = 0; i < rails_cnt; i++) {
if (rails[i].freq_req == 1) {
usefreq |= BIT(i);
check_freq_table();
/*
* Restrict frequency by default until we have made
* our first temp reading
*/
if (freq_table_get)
ret = vdd_restriction_apply_freq(&rails[i], 0);
else
pr_info("Defer vdd rstr freq init.\n");
} else {
rails[i].reg = devm_regulator_get(&pdev->dev,
rails[i].name);
if (IS_ERR_OR_NULL(rails[i].reg)) {
ret = PTR_ERR(rails[i].reg);
if (ret != -EPROBE_DEFER) {
pr_err( \
"could not get regulator: %s. err:%d\n",
rails[i].name, ret);
rails[i].reg = NULL;
rails[i].curr_level = -2;
return ret;
}
pr_info("Defer regulator %s probe\n",
rails[i].name);
return ret;
}
/*
* Restrict votlage by default until we have made
* our first temp reading
*/
ret = vdd_restriction_apply_voltage(&rails[i], 0);
}
}
return ret;
}
static int psm_reg_init(struct platform_device *pdev)
{
int ret = 0;
int i = 0;
int j = 0;
for (i = 0; i < psm_rails_cnt; i++) {
psm_rails[i].reg = rpm_regulator_get(&pdev->dev,
psm_rails[i].name);
if (IS_ERR_OR_NULL(psm_rails[i].reg)) {
ret = PTR_ERR(psm_rails[i].reg);
if (ret != -EPROBE_DEFER) {
pr_err("couldn't get rpm regulator %s. err%d\n",
psm_rails[i].name, ret);
psm_rails[i].reg = NULL;
goto psm_reg_exit;
}
pr_info("Defer regulator %s probe\n",
psm_rails[i].name);
return ret;
}
/* Apps default vote for PWM mode */
psm_rails[i].init = PMIC_PWM_MODE;
ret = rpm_regulator_set_mode(psm_rails[i].reg,
psm_rails[i].init);
if (ret) {
pr_err("Cannot set PMIC PWM mode. err:%d\n", ret);
return ret;
} else
psm_rails[i].mode = PMIC_PWM_MODE;
}
return ret;
psm_reg_exit:
if (ret) {
for (j = 0; j < i; j++) {
if (psm_rails[j].reg != NULL)
rpm_regulator_put(psm_rails[j].reg);
}
}
return ret;
}
static struct kobj_attribute sensor_info_attr =
__ATTR_RO(sensor_info);
static int msm_thermal_add_sensor_info_nodes(void)
{
struct kobject *module_kobj = NULL;
int ret = 0;
if (!sensor_info_probed) {
sensor_info_nodes_called = true;
return ret;
}
if (sensor_info_probed && sensor_cnt == 0)
return ret;
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
pr_err("cannot find kobject\n");
return -ENOENT;
}
sysfs_attr_init(&sensor_info_attr.attr);
ret = sysfs_create_file(module_kobj, &sensor_info_attr.attr);
if (ret) {
pr_err(
"cannot create sensor info kobject attribute. err:%d\n",
ret);
return ret;
}
return ret;
}
static int msm_thermal_add_vdd_rstr_nodes(void)
{
struct kobject *module_kobj = NULL;
struct kobject *vdd_rstr_kobj = NULL;
struct kobject *vdd_rstr_reg_kobj[MAX_RAILS] = {0};
int rc = 0;
int i = 0;
if (!vdd_rstr_probed) {
vdd_rstr_nodes_called = true;
return rc;
}
if (vdd_rstr_probed && rails_cnt == 0)
return rc;
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
pr_err("cannot find kobject\n");
rc = -ENOENT;
goto thermal_sysfs_add_exit;
}
vdd_rstr_kobj = kobject_create_and_add("vdd_restriction", module_kobj);
if (!vdd_rstr_kobj) {
pr_err("cannot create vdd_restriction kobject\n");
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
rc = sysfs_create_group(vdd_rstr_kobj, &vdd_rstr_en_attribs_gp);
if (rc) {
pr_err("cannot create kobject attribute group. err:%d\n", rc);
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
for (i = 0; i < rails_cnt; i++) {
vdd_rstr_reg_kobj[i] = kobject_create_and_add(rails[i].name,
vdd_rstr_kobj);
if (!vdd_rstr_reg_kobj[i]) {
pr_err("cannot create kobject for %s\n",
rails[i].name);
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
rails[i].attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 3,
GFP_KERNEL);
if (!rails[i].attr_gp.attrs) {
pr_err("kzalloc failed\n");
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
VDD_RES_RW_ATTRIB(rails[i], rails[i].level_attr, 0, level);
VDD_RES_RO_ATTRIB(rails[i], rails[i].value_attr, 1, value);
rails[i].attr_gp.attrs[2] = NULL;
rc = sysfs_create_group(vdd_rstr_reg_kobj[i],
&rails[i].attr_gp);
if (rc) {
pr_err("cannot create attribute group for %s. err:%d\n",
rails[i].name, rc);
goto thermal_sysfs_add_exit;
}
}
return rc;
thermal_sysfs_add_exit:
if (rc) {
for (i = 0; i < rails_cnt; i++) {
kobject_del(vdd_rstr_reg_kobj[i]);
kfree(rails[i].attr_gp.attrs);
}
if (vdd_rstr_kobj)
kobject_del(vdd_rstr_kobj);
}
return rc;
}
static int msm_thermal_add_ocr_nodes(void)
{
struct kobject *module_kobj = NULL;
struct kobject *ocr_kobj = NULL;
struct kobject *ocr_reg_kobj[MAX_RAILS] = {0};
int rc = 0;
int i = 0;
if (!ocr_probed) {
ocr_nodes_called = true;
return rc;
}
if (ocr_probed && ocr_rail_cnt == 0)
return rc;
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
pr_err("Cannot find kobject\n");
rc = -ENOENT;
goto ocr_node_exit;
}
ocr_kobj = kobject_create_and_add("opt_curr_req", module_kobj);
if (!ocr_kobj) {
pr_err("Cannot create ocr kobject\n");
rc = -ENOMEM;
goto ocr_node_exit;
}
for (i = 0; i < ocr_rail_cnt; i++) {
ocr_reg_kobj[i] = kobject_create_and_add(ocr_rails[i].name,
ocr_kobj);
if (!ocr_reg_kobj[i]) {
pr_err("Cannot create kobject for %s\n",
ocr_rails[i].name);
rc = -ENOMEM;
goto ocr_node_exit;
}
ocr_rails[i].attr_gp.attrs = kzalloc(
sizeof(struct attribute *) * 2, GFP_KERNEL);
if (!ocr_rails[i].attr_gp.attrs) {
pr_err("Fail to allocate memory for attribute for %s\n",
ocr_rails[i].name);
rc = -ENOMEM;
goto ocr_node_exit;
}
OCR_RW_ATTRIB(ocr_rails[i], ocr_rails[i].mode_attr, 0, mode);
ocr_rails[i].attr_gp.attrs[1] = NULL;
rc = sysfs_create_group(ocr_reg_kobj[i], &ocr_rails[i].attr_gp);
if (rc) {
pr_err("Cannot create attribute group for %s. err:%d\n",
ocr_rails[i].name, rc);
goto ocr_node_exit;
}
}
ocr_node_exit:
if (rc) {
for (i = 0; i < ocr_rail_cnt; i++) {
if (ocr_reg_kobj[i])
kobject_del(ocr_reg_kobj[i]);
kfree(ocr_rails[i].attr_gp.attrs);
ocr_rails[i].attr_gp.attrs = NULL;
}
if (ocr_kobj)
kobject_del(ocr_kobj);
}
return rc;
}
static int msm_thermal_add_psm_nodes(void)
{
struct kobject *module_kobj = NULL;
struct kobject *psm_kobj = NULL;
struct kobject *psm_reg_kobj[MAX_RAILS] = {0};
int rc = 0;
int i = 0;
if (!psm_probed) {
psm_nodes_called = true;
return rc;
}
if (psm_probed && psm_rails_cnt == 0)
return rc;
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
pr_err("cannot find kobject\n");
rc = -ENOENT;
goto psm_node_exit;
}
psm_kobj = kobject_create_and_add("pmic_sw_mode", module_kobj);
if (!psm_kobj) {
pr_err("cannot create psm kobject\n");
rc = -ENOMEM;
goto psm_node_exit;
}
for (i = 0; i < psm_rails_cnt; i++) {
psm_reg_kobj[i] = kobject_create_and_add(psm_rails[i].name,
psm_kobj);
if (!psm_reg_kobj[i]) {
pr_err("cannot create kobject for %s\n",
psm_rails[i].name);
rc = -ENOMEM;
goto psm_node_exit;
}
psm_rails[i].attr_gp.attrs = kzalloc( \
sizeof(struct attribute *) * 2, GFP_KERNEL);
if (!psm_rails[i].attr_gp.attrs) {
pr_err("kzalloc failed\n");
rc = -ENOMEM;
goto psm_node_exit;
}
PSM_RW_ATTRIB(psm_rails[i], psm_rails[i].mode_attr, 0, mode);
psm_rails[i].attr_gp.attrs[1] = NULL;
rc = sysfs_create_group(psm_reg_kobj[i], &psm_rails[i].attr_gp);
if (rc) {
pr_err("cannot create attribute group for %s. err:%d\n",
psm_rails[i].name, rc);
goto psm_node_exit;
}
}
return rc;
psm_node_exit:
if (rc) {
for (i = 0; i < psm_rails_cnt; i++) {
kobject_del(psm_reg_kobj[i]);
kfree(psm_rails[i].attr_gp.attrs);
}
if (psm_kobj)
kobject_del(psm_kobj);
}
return rc;
}
static int probe_vdd_mx(struct device_node *node,
struct msm_thermal_data *data, struct platform_device *pdev)
{
int ret = 0;
char *key = NULL;
key = "qcom,disable-vdd-mx";
if (of_property_read_bool(node, key)) {
vdd_mx_enabled = false;
return ret;
}
key = "qcom,mx-restriction-temp";
ret = of_property_read_u32(node, key, &data->vdd_mx_temp_degC);
if (ret)
goto read_node_done;
key = "qcom,mx-restriction-temp-hysteresis";
ret = of_property_read_u32(node, key, &data->vdd_mx_temp_hyst_degC);
if (ret)
goto read_node_done;
key = "qcom,mx-retention-min";
ret = of_property_read_u32(node, key, &data->vdd_mx_min);
if (ret)
goto read_node_done;
vdd_mx = devm_regulator_get(&pdev->dev, "vdd-mx");
if (IS_ERR_OR_NULL(vdd_mx)) {
ret = PTR_ERR(vdd_mx);
if (ret != -EPROBE_DEFER) {
pr_err(
"Could not get regulator: vdd-mx, err:%d\n", ret);
}
goto read_node_done;
}
ret = init_threshold(MSM_VDD_MX_RESTRICTION, MONITOR_ALL_TSENS,
data->vdd_mx_temp_degC + data->vdd_mx_temp_hyst_degC,
data->vdd_mx_temp_degC, vdd_mx_notify);
read_node_done:
if (!ret)
vdd_mx_enabled = true;
else if (ret != -EPROBE_DEFER)
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s. KTM continues\n",
__func__, node->full_name, key);
return ret;
}
static int probe_vdd_rstr(struct device_node *node,
struct msm_thermal_data *data, struct platform_device *pdev)
{
int ret = 0;
int i = 0;
int arr_size;
char *key = NULL;
struct device_node *child_node = NULL;
rails = NULL;
key = "qcom,disable-vdd-rstr";
if (of_property_read_bool(node, key)) {
vdd_rstr_probed = true;
vdd_rstr_enabled = false;
rails_cnt = 0;
return ret;
}
key = "qcom,vdd-restriction-temp";
ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_degC);
if (ret)
goto read_node_fail;
key = "qcom,vdd-restriction-temp-hysteresis";
ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_hyst_degC);
if (ret)
goto read_node_fail;
for_each_child_of_node(node, child_node) {
rails_cnt++;
}
if (rails_cnt == 0)
goto read_node_fail;
if (rails_cnt >= MAX_RAILS) {
pr_err("Too many rails:%d.\n", rails_cnt);
return -EFAULT;
}
rails = kzalloc(sizeof(struct rail) * rails_cnt,
GFP_KERNEL);
if (!rails) {
pr_err("Fail to allocate memory for rails.\n");
return -ENOMEM;
}
i = 0;
for_each_child_of_node(node, child_node) {
key = "qcom,vdd-rstr-reg";
ret = of_property_read_string(child_node, key, &rails[i].name);
if (ret)
goto read_node_fail;
key = "qcom,levels";
if (!of_get_property(child_node, key, &arr_size))
goto read_node_fail;
rails[i].num_levels = arr_size/sizeof(__be32);
if (rails[i].num_levels >
sizeof(rails[i].levels)/sizeof(uint32_t)) {
pr_err("Array size:%d too large for index:%d\n",
rails[i].num_levels, i);
return -EFAULT;
}
ret = of_property_read_u32_array(child_node, key,
rails[i].levels, rails[i].num_levels);
if (ret)
goto read_node_fail;
key = "qcom,freq-req";
rails[i].freq_req = of_property_read_bool(child_node, key);
if (rails[i].freq_req)
rails[i].min_level = 0;
else {
key = "qcom,min-level";
ret = of_property_read_u32(child_node, key,
&rails[i].min_level);
if (ret)
goto read_node_fail;
}
rails[i].curr_level = -1;
rails[i].reg = NULL;
i++;
}
if (rails_cnt) {
ret = vdd_restriction_reg_init(pdev);
if (ret) {
pr_err("Err regulator init. err:%d. KTM continues.\n",
ret);
goto read_node_fail;
}
ret = init_threshold(MSM_VDD_RESTRICTION, MONITOR_ALL_TSENS,
data->vdd_rstr_temp_hyst_degC, data->vdd_rstr_temp_degC,
vdd_restriction_notify);
if (ret) {
pr_err("Error in initializing thresholds. err:%d\n",
ret);
goto read_node_fail;
}
vdd_rstr_enabled = true;
}
read_node_fail:
vdd_rstr_probed = true;
if (ret) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
__func__, node->full_name, key, ret);
kfree(rails);
rails_cnt = 0;
}
if (ret == -EPROBE_DEFER)
vdd_rstr_probed = false;
return ret;
}
static void probe_sensor_info(struct device_node *node,
struct msm_thermal_data *data, struct platform_device *pdev)
{
int err = 0;
int i = 0;
char *key = NULL;
struct device_node *child_node = NULL;
struct device_node *np = NULL;
key = "qcom,disable-sensor-info";
if (of_property_read_bool(node, key)) {
sensor_info_probed = true;
return;
}
np = of_find_compatible_node(NULL, NULL, "qcom,sensor-information");
if (!np) {
dev_info(&pdev->dev,
"%s:unable to find DT for sensor-information.KTM continues\n",
__func__);
sensor_info_probed = true;
return;
}
sensor_cnt = of_get_child_count(np);
if (sensor_cnt == 0) {
err = -ENODEV;
goto read_node_fail;
}
sensors = devm_kzalloc(&pdev->dev,
sizeof(struct msm_sensor_info) * sensor_cnt,
GFP_KERNEL);
if (!sensors) {
pr_err("Fail to allocate memory for sensor_info.\n");
err = -ENOMEM;
goto read_node_fail;
}
for_each_child_of_node(np, child_node) {
key = "qcom,sensor-type";
err = of_property_read_string(child_node,
key, &sensors[i].type);
if (err)
goto read_node_fail;
key = "qcom,sensor-name";
err = of_property_read_string(child_node,
key, &sensors[i].name);
if (err)
goto read_node_fail;
key = "qcom,alias-name";
of_property_read_string(child_node,
key, &sensors[i].alias);
key = "qcom,scaling-factor";
err = of_property_read_u32(child_node,
key, &sensors[i].scaling_factor);
if (err) {
sensors[i].scaling_factor = SENSOR_SCALING_FACTOR;
err = 0;
}
i++;
}
read_node_fail:
sensor_info_probed = true;
if (err) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
__func__, np->full_name, key, err);
devm_kfree(&pdev->dev, sensors);
}
}
static int probe_ocr(struct device_node *node, struct msm_thermal_data *data,
struct platform_device *pdev)
{
int ret = 0;
int j = 0;
char *key = NULL;
if (ocr_probed) {
pr_info("Nodes already probed\n");
goto read_ocr_exit;
}
ocr_rails = NULL;
key = "qcom,disable-ocr";
if (of_property_read_bool(node, key)) {
ocr_probed = true;
ocr_enabled = false;
ocr_rail_cnt = 0;
goto read_ocr_exit;
}
key = "qcom,pmic-opt-curr-temp";
ret = of_property_read_u32(node, key, &data->ocr_temp_degC);
if (ret)
goto read_ocr_fail;
key = "qcom,pmic-opt-curr-temp-hysteresis";
ret = of_property_read_u32(node, key, &data->ocr_temp_hyst_degC);
if (ret)
goto read_ocr_fail;
key = "qcom,pmic-opt-curr-regs";
ocr_rail_cnt = of_property_count_strings(node, key);
if (ocr_rail_cnt <= 0) {
pr_err("Invalid ocr rail count. err:%d\n", ocr_rail_cnt);
goto read_ocr_fail;
}
ocr_rails = kzalloc(sizeof(struct psm_rail) * ocr_rail_cnt,
GFP_KERNEL);
if (!ocr_rails) {
pr_err("Fail to allocate memory for ocr rails\n");
ocr_rail_cnt = 0;
return -ENOMEM;
}
for (j = 0; j < ocr_rail_cnt; j++) {
ret = of_property_read_string_index(node, key, j,
&ocr_rails[j].name);
if (ret)
goto read_ocr_fail;
ocr_rails[j].phase_reg = NULL;
ocr_rails[j].init = OPTIMUM_CURRENT_MAX;
}
key = "qcom,pmic-opt-curr-sensor-id";
ret = of_property_read_u32(node, key, &data->ocr_sensor_id);
if (ret) {
pr_info("ocr sensor is not configured, use all TSENS. err:%d\n",
ret);
data->ocr_sensor_id = MONITOR_ALL_TSENS;
}
ret = ocr_reg_init(pdev);
if (ret) {
if (ret == -EPROBE_DEFER) {
ocr_reg_init_defer = true;
pr_info("ocr reg init is defered\n");
} else {
pr_err(
"Failed to get regulators. KTM continues. err:%d\n",
ret);
goto read_ocr_fail;
}
}
ret = init_threshold(MSM_OCR, data->ocr_sensor_id,
data->ocr_temp_degC,
data->ocr_temp_degC - data->ocr_temp_hyst_degC,
ocr_notify);
if (ret)
goto read_ocr_fail;
if (!ocr_reg_init_defer)
ocr_enabled = true;
ocr_nodes_called = false;
/*
* Vote for max optimum current by default until we have made
* our first temp reading
*/
if (ocr_enabled) {
ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
if (ret) {
pr_err("Set max optimum current failed. err:%d\n",
ret);
ocr_enabled = false;
}
}
read_ocr_fail:
ocr_probed = true;
if (ret) {
if (ret == -EPROBE_DEFER) {
ret = 0;
goto read_ocr_exit;
}
dev_err(
&pdev->dev,
"%s:Failed reading node=%s, key=%s err:%d. KTM continues\n",
__func__, node->full_name, key, ret);
kfree(ocr_rails);
ocr_rails = NULL;
ocr_rail_cnt = 0;
}
read_ocr_exit:
return ret;
}
static int probe_psm(struct device_node *node, struct msm_thermal_data *data,
struct platform_device *pdev)
{
int ret = 0;
int j = 0;
char *key = NULL;
psm_rails = NULL;
key = "qcom,disable-psm";
if (of_property_read_bool(node, key)) {
psm_probed = true;
psm_enabled = false;
psm_rails_cnt = 0;
return ret;
}
key = "qcom,pmic-sw-mode-temp";
ret = of_property_read_u32(node, key, &data->psm_temp_degC);
if (ret)
goto read_node_fail;
key = "qcom,pmic-sw-mode-temp-hysteresis";
ret = of_property_read_u32(node, key, &data->psm_temp_hyst_degC);
if (ret)
goto read_node_fail;
key = "qcom,pmic-sw-mode-regs";
psm_rails_cnt = of_property_count_strings(node, key);
psm_rails = kzalloc(sizeof(struct psm_rail) * psm_rails_cnt,
GFP_KERNEL);
if (!psm_rails) {
pr_err("Fail to allocate memory for psm rails\n");
psm_rails_cnt = 0;
return -ENOMEM;
}
for (j = 0; j < psm_rails_cnt; j++) {
ret = of_property_read_string_index(node, key, j,
&psm_rails[j].name);
if (ret)
goto read_node_fail;
}
if (psm_rails_cnt) {
ret = psm_reg_init(pdev);
if (ret) {
pr_err("Err regulator init. err:%d. KTM continues.\n",
ret);
goto read_node_fail;
}
psm_enabled = true;
}
read_node_fail:
psm_probed = true;
if (ret) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
__func__, node->full_name, key, ret);
kfree(psm_rails);
psm_rails_cnt = 0;
}
if (ret == -EPROBE_DEFER)
psm_probed = false;
return ret;
}
static int probe_cc(struct device_node *node, struct msm_thermal_data *data,
struct platform_device *pdev)
{
char *key = NULL;
uint32_t cpu_cnt = 0;
int ret = 0;
uint32_t cpu = 0;
if (num_possible_cpus() > 1) {
core_control_enabled = 1;
hotplug_enabled = 1;
}
key = "qcom,core-limit-temp";
ret = of_property_read_u32(node, key, &data->core_limit_temp_degC);
if (ret)
goto read_node_fail;
key = "qcom,core-temp-hysteresis";
ret = of_property_read_u32(node, key, &data->core_temp_hysteresis_degC);
if (ret)
goto read_node_fail;
key = "qcom,core-control-mask";
ret = of_property_read_u32(node, key, &data->core_control_mask);
if (ret)
goto read_node_fail;
key = "qcom,hotplug-temp";
ret = of_property_read_u32(node, key, &data->hotplug_temp_degC);
if (ret)
goto hotplug_node_fail;
key = "qcom,hotplug-temp-hysteresis";
ret = of_property_read_u32(node, key,
&data->hotplug_temp_hysteresis_degC);
if (ret)
goto hotplug_node_fail;
key = "qcom,cpu-sensors";
cpu_cnt = of_property_count_strings(node, key);
if (cpu_cnt < num_possible_cpus()) {
pr_err("Wrong number of cpu sensors:%d\n", cpu_cnt);
ret = -EINVAL;
goto hotplug_node_fail;
}
for_each_possible_cpu(cpu) {
ret = of_property_read_string_index(node, key, cpu,
&cpus[cpu].sensor_type);
if (ret)
goto hotplug_node_fail;
}
read_node_fail:
if (ret) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
KBUILD_MODNAME, node->full_name, key, ret);
core_control_enabled = 0;
}
return ret;
hotplug_node_fail:
if (ret) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
KBUILD_MODNAME, node->full_name, key, ret);
hotplug_enabled = 0;
}
return ret;
}
static int probe_gfx_phase_ctrl(struct device_node *node,
struct msm_thermal_data *data,
struct platform_device *pdev)
{
char *key = NULL;
const char *tmp_str = NULL;
int ret = 0;
key = "qcom,disable-gfx-phase-ctrl";
if (of_property_read_bool(node, key)) {
gfx_crit_phase_ctrl_enabled = false;
gfx_warm_phase_ctrl_enabled = false;
return ret;
}
key = "qcom,gfx-sensor-id";
ret = of_property_read_u32(node, key,
&data->gfx_sensor);
if (ret)
goto probe_gfx_exit;
key = "qcom,gfx-phase-resource-key";
ret = of_property_read_string(node, key,
&tmp_str);
if (ret)
goto probe_gfx_exit;
data->gfx_phase_request_key = msm_thermal_str_to_int(tmp_str);
key = "qcom,gfx-phase-warm-temp";
ret = of_property_read_u32(node, key,
&data->gfx_phase_warm_temp_degC);
if (ret) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
KBUILD_MODNAME, node->full_name, key, ret);
data->gfx_phase_warm_temp_degC = INT_MIN;
goto probe_gfx_crit;
}
key = "qcom,gfx-phase-warm-temp-hyst";
ret = of_property_read_u32(node, key,
&data->gfx_phase_warm_temp_hyst_degC);
if (ret) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
KBUILD_MODNAME, node->full_name, key, ret);
goto probe_gfx_crit;
}
ret = init_threshold(MSM_GFX_PHASE_CTRL_WARM, data->gfx_sensor,
data->gfx_phase_warm_temp_degC, data->gfx_phase_warm_temp_degC -
data->gfx_phase_warm_temp_hyst_degC,
gfx_phase_ctrl_notify);
if (ret) {
pr_err("init WARM threshold failed. err:%d\n", ret);
goto probe_gfx_crit;
}
gfx_warm_phase_ctrl_enabled = true;
probe_gfx_crit:
key = "qcom,gfx-phase-hot-crit-temp";
ret = of_property_read_u32(node, key,
&data->gfx_phase_hot_temp_degC);
if (ret) {
data->gfx_phase_hot_temp_degC = INT_MAX;
goto probe_gfx_exit;
}
key = "qcom,gfx-phase-hot-crit-temp-hyst";
ret = of_property_read_u32(node, key,
&data->gfx_phase_hot_temp_hyst_degC);
if (ret)
goto probe_gfx_exit;
ret = init_threshold(MSM_GFX_PHASE_CTRL_HOT, data->gfx_sensor,
data->gfx_phase_hot_temp_degC, data->gfx_phase_hot_temp_degC -
data->gfx_phase_hot_temp_hyst_degC,
gfx_phase_ctrl_notify);
if (ret) {
pr_err("init HOT threshold failed. err:%d\n", ret);
goto probe_gfx_exit;
}
gfx_crit_phase_ctrl_enabled = true;
probe_gfx_exit:
if (ret) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
KBUILD_MODNAME, node->full_name, key, ret);
}
return ret;
}
static int probe_cx_phase_ctrl(struct device_node *node,
struct msm_thermal_data *data,
struct platform_device *pdev)
{
char *key = NULL;
const char *tmp_str;
int ret = 0;
key = "qcom,disable-cx-phase-ctrl";
if (of_property_read_bool(node, key)) {
cx_phase_ctrl_enabled = false;
return ret;
}
key = "qcom,rpm-phase-resource-type";
ret = of_property_read_string(node, key,
&tmp_str);
if (ret)
goto probe_cx_exit;
data->phase_rpm_resource_type = msm_thermal_str_to_int(tmp_str);
key = "qcom,rpm-phase-resource-id";
ret = of_property_read_u32(node, key,
&data->phase_rpm_resource_id);
if (ret)
goto probe_cx_exit;
key = "qcom,cx-phase-resource-key";
ret = of_property_read_string(node, key,
&tmp_str);
if (ret)
goto probe_cx_exit;
data->cx_phase_request_key = msm_thermal_str_to_int(tmp_str);
key = "qcom,cx-phase-hot-crit-temp";
ret = of_property_read_u32(node, key,
&data->cx_phase_hot_temp_degC);
if (ret)
goto probe_cx_exit;
key = "qcom,cx-phase-hot-crit-temp-hyst";
ret = of_property_read_u32(node, key,
&data->cx_phase_hot_temp_hyst_degC);
if (ret)
goto probe_cx_exit;
ret = init_threshold(MSM_CX_PHASE_CTRL_HOT, MONITOR_ALL_TSENS,
data->cx_phase_hot_temp_degC, data->cx_phase_hot_temp_degC -
data->cx_phase_hot_temp_hyst_degC,
cx_phase_ctrl_notify);
if (ret) {
pr_err("init HOT threshold failed. err:%d\n", ret);
goto probe_cx_exit;
}
cx_phase_ctrl_enabled = true;
probe_cx_exit:
if (ret) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s err=%d. KTM continues\n",
KBUILD_MODNAME, node->full_name, key, ret);
cx_phase_ctrl_enabled = false;
}
return ret;
}
static int probe_therm_reset(struct device_node *node,
struct msm_thermal_data *data,
struct platform_device *pdev)
{
char *key = NULL;
int ret = 0;
key = "qcom,therm-reset-temp";
ret = of_property_read_u32(node, key, &data->therm_reset_temp_degC);
if (ret)
goto PROBE_RESET_EXIT;
ret = init_threshold(MSM_THERM_RESET, MONITOR_ALL_TSENS,
data->therm_reset_temp_degC, data->therm_reset_temp_degC - 10,
therm_reset_notify);
if (ret) {
pr_err("Therm reset data structure init failed\n");
goto PROBE_RESET_EXIT;
}
therm_reset_enabled = true;
PROBE_RESET_EXIT:
if (ret) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s err=%d. KTM continues\n",
__func__, node->full_name, key, ret);
therm_reset_enabled = false;
}
return ret;
}
static int probe_freq_mitigation(struct device_node *node,
struct msm_thermal_data *data,
struct platform_device *pdev)
{
char *key = NULL;
int ret = 0;
key = "qcom,freq-mitigation-temp";
ret = of_property_read_u32(node, key, &data->freq_mitig_temp_degc);
if (ret)
goto PROBE_FREQ_EXIT;
key = "qcom,freq-mitigation-temp-hysteresis";
ret = of_property_read_u32(node, key,
&data->freq_mitig_temp_hysteresis_degc);
if (ret)
goto PROBE_FREQ_EXIT;
key = "qcom,freq-mitigation-value";
ret = of_property_read_u32(node, key, &data->freq_limit);
if (ret)
goto PROBE_FREQ_EXIT;
key = "qcom,freq-mitigation-control-mask";
ret = of_property_read_u32(node, key, &data->freq_mitig_control_mask);
if (ret)
goto PROBE_FREQ_EXIT;
freq_mitigation_enabled = 1;
PROBE_FREQ_EXIT:
if (ret) {
dev_info(&pdev->dev,
"%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
__func__, node->full_name, key, ret);
freq_mitigation_enabled = 0;
}
return ret;
}
static int msm_thermal_dev_probe(struct platform_device *pdev)
{
int ret = 0;
char *key = NULL;
struct device_node *node = pdev->dev.of_node;
struct msm_thermal_data data;
memset(&data, 0, sizeof(struct msm_thermal_data));
data.pdev = pdev;
ret = msm_thermal_pre_init(&pdev->dev);
if (ret) {
pr_err("thermal pre init failed. err:%d\n", ret);
goto fail;
}
key = "qcom,sensor-id";
ret = of_property_read_u32(node, key, &data.sensor_id);
if (ret)
goto fail;
key = "qcom,temp-hysteresis";
ret = of_property_read_u32(node, key, &data.temp_hysteresis_degC);
if (ret)
goto fail;
key = "qcom,freq-step";
ret = of_property_read_u32(node, key, &data.bootup_freq_step);
if (ret)
goto fail;
key = "qcom,online-hotplug-core";
if (of_property_read_bool(node, key))
online_core = true;
else
online_core = false;
key = "qcom,freq-control-mask";
ret = of_property_read_u32(node, key, &data.bootup_freq_control_mask);
ret = probe_cc(node, &data, pdev);
ret = probe_freq_mitigation(node, &data, pdev);
ret = probe_cx_phase_ctrl(node, &data, pdev);
ret = probe_gfx_phase_ctrl(node, &data, pdev);
ret = probe_therm_reset(node, &data, pdev);
ret = probe_vdd_mx(node, &data, pdev);
if (ret == -EPROBE_DEFER)
goto fail;
/*
* Probe optional properties below. Call probe_psm before
* probe_vdd_rstr because rpm_regulator_get has to be called
* before devm_regulator_get
* probe_ocr should be called after probe_vdd_rstr to reuse the
* regualtor handle. calling devm_regulator_get more than once
* will fail.
*/
ret = probe_psm(node, &data, pdev);
if (ret == -EPROBE_DEFER)
goto fail;
ret = probe_vdd_rstr(node, &data, pdev);
if (ret == -EPROBE_DEFER)
goto fail;
probe_sensor_info(node, &data, pdev);
ret = probe_ocr(node, &data, pdev);
update_cpu_topology(&pdev->dev);
/*
* In case sysfs add nodes get called before probe function.
* Need to make sure sysfs node is created again
*/
if (psm_nodes_called) {
msm_thermal_add_psm_nodes();
psm_nodes_called = false;
}
if (vdd_rstr_nodes_called) {
msm_thermal_add_vdd_rstr_nodes();
vdd_rstr_nodes_called = false;
}
if (sensor_info_nodes_called) {
msm_thermal_add_sensor_info_nodes();
sensor_info_nodes_called = false;
}
if (ocr_nodes_called) {
msm_thermal_add_ocr_nodes();
ocr_nodes_called = false;
}
if (cluster_info_nodes_called) {
create_cpu_topology_sysfs();
cluster_info_nodes_called = false;
}
msm_thermal_ioctl_init();
ret = msm_thermal_init(&data);
msm_thermal_probed = true;
if (interrupt_mode_enable) {
interrupt_mode_init();
interrupt_mode_enable = false;
}
return ret;
fail:
if (ret)
pr_err("Failed reading node=%s, key=%s. err:%d\n",
node->full_name, key, ret);
return ret;
}
static int msm_thermal_dev_exit(struct platform_device *inp_dev)
{
int i = 0;
msm_thermal_ioctl_cleanup();
if (thresh) {
if (vdd_rstr_enabled)
kfree(thresh[MSM_VDD_RESTRICTION].thresh_list);
if (cx_phase_ctrl_enabled)
kfree(thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list);
if (gfx_warm_phase_ctrl_enabled)
kfree(thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list);
if (gfx_crit_phase_ctrl_enabled)
kfree(thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list);
if (ocr_enabled) {
for (i = 0; i < ocr_rail_cnt; i++)
kfree(ocr_rails[i].attr_gp.attrs);
kfree(ocr_rails);
ocr_rails = NULL;
kfree(thresh[MSM_OCR].thresh_list);
}
if (vdd_mx_enabled) {
kfree(mx_kobj);
kfree(mx_attr_group.attrs);
kfree(thresh[MSM_VDD_MX_RESTRICTION].thresh_list);
}
kfree(thresh);
thresh = NULL;
}
return 0;
}
static struct of_device_id msm_thermal_match_table[] = {
{.compatible = "qcom,msm-thermal"},
{},
};
static struct platform_driver msm_thermal_device_driver = {
.probe = msm_thermal_dev_probe,
.driver = {
.name = "msm-thermal",
.owner = THIS_MODULE,
.of_match_table = msm_thermal_match_table,
},
.remove = msm_thermal_dev_exit,
};
int __init msm_thermal_device_init(void)
{
return platform_driver_register(&msm_thermal_device_driver);
}
arch_initcall(msm_thermal_device_init);
int __init msm_thermal_late_init(void)
{
if (num_possible_cpus() > 1)
msm_thermal_add_cc_nodes();
msm_thermal_add_psm_nodes();
msm_thermal_add_vdd_rstr_nodes();
msm_thermal_add_sensor_info_nodes();
if (ocr_reg_init_defer) {
if (!ocr_reg_init(msm_thermal_info.pdev)) {
ocr_enabled = true;
msm_thermal_add_ocr_nodes();
}
}
msm_thermal_add_mx_nodes();
interrupt_mode_init();
create_cpu_topology_sysfs();
return 0;
}
late_initcall(msm_thermal_late_init);
| C-Aniruddh/kernel_vortex | drivers/thermal/msm_thermal.c | C | gpl-2.0 | 134,237 |
/* $Id$
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
oroborus - (c) 2001 Ken Lynch
xfwm4 - (c) 2002-2020 Olivier Fourdan
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <string.h>
#include <X11/X.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xatom.h>
#include <X11/extensions/shape.h>
#include <glib.h>
#include <gdk/gdk.h>
#include <gdk/gdkx.h>
#include <gtk/gtk.h>
#ifdef HAVE_RANDR
#include <X11/extensions/Xrandr.h>
#endif
#include <libxfce4util/libxfce4util.h>
#include <libxfce4ui/libxfce4ui.h>
#include <common/xfwm-common.h>
#include "misc.h"
#include "workspaces.h"
#include "settings.h"
#include "mywindow.h"
#include "frame.h"
#include "client.h"
#include "moveresize.h"
#include "cycle.h"
#include "placement.h"
#include "stacking.h"
#include "transients.h"
#include "focus.h"
#include "netwm.h"
#include "menu.h"
#include "hints.h"
#include "startup_notification.h"
#include "compositor.h"
#include "events.h"
#include "event_filter.h"
#include "xsync.h"
#include "display.h"
#ifndef CHECK_BUTTON_TIME
#define CHECK_BUTTON_TIME 0
#endif
#define WIN_IS_BUTTON(win) ((win == MYWINDOW_XWINDOW(c->buttons[HIDE_BUTTON])) || \
(win == MYWINDOW_XWINDOW(c->buttons[CLOSE_BUTTON])) || \
(win == MYWINDOW_XWINDOW(c->buttons[MAXIMIZE_BUTTON])) || \
(win == MYWINDOW_XWINDOW(c->buttons[SHADE_BUTTON])) || \
(win == MYWINDOW_XWINDOW(c->buttons[STICK_BUTTON])))
#define DOUBLE_CLICK_GRAB (ButtonMotionMask | \
PointerMotionMask | \
ButtonPressMask | \
ButtonReleaseMask)
static xfwmWindow menu_event_window;
/* Forward decl. */
static eventFilterStatus handleEvent (DisplayInfo *display_info,
XfwmEvent *event);
static void menu_callback (Menu * menu,
MenuOp op,
Window xid,
gpointer menu_data,
gpointer item_data);
static void show_window_menu (Client *c,
gint px,
gint py,
guint button,
guint32 time,
gboolean needscale);
static gboolean show_popup_cb (GtkWidget * widget,
GdkEventButton * ev,
gpointer data);
static gboolean set_reload (DisplayInfo *display_info);
typedef enum
{
XFWM_BUTTON_UNDEFINED = 0,
XFWM_BUTTON_DRAG = 1,
XFWM_BUTTON_CLICK = 2,
XFWM_BUTTON_CLICK_AND_DRAG = 3,
XFWM_BUTTON_DOUBLE_CLICK = 4
}
XfwmButtonClickType;
typedef struct _XfwmButtonClickData XfwmButtonClickData;
struct _XfwmButtonClickData
{
DisplayInfo *display_info;
Window w;
guint button;
guint clicks;
guint timeout;
gint x0;
gint y0;
guint t0;
gint xcurrent;
gint ycurrent;
guint tcurrent;
gint double_click_time;
gint double_click_distance;
gboolean allow_double_click;
};
static gboolean
typeOfClick_end (gpointer data)
{
XfwmButtonClickData *passdata;
passdata = data;
if (passdata->timeout)
{
g_source_remove (passdata->timeout);
passdata->timeout = 0;
}
gtk_main_quit ();
return FALSE;
}
static eventFilterStatus
typeOfClick_event_filter (XfwmEvent *event, gpointer data)
{
XfwmButtonClickData *passdata;
eventFilterStatus status;
guint32 timestamp;
gboolean keep_going;
keep_going = TRUE;
passdata = data;
status = EVENT_FILTER_CONTINUE;
/* Update the display time */
timestamp = myDisplayUpdateCurrentTime (passdata->display_info, event);
if (timestamp)
{
passdata->tcurrent = timestamp;
if (((gint) passdata->tcurrent - (gint) passdata->t0) > passdata->double_click_time)
{
keep_going = FALSE;
}
}
if (event->meta.type == XFWM_EVENT_BUTTON)
{
if (event->button.button == passdata->button)
{
passdata->clicks++;
}
if (((XfwmButtonClickType) passdata->clicks == XFWM_BUTTON_DOUBLE_CLICK)
|| (!(passdata->allow_double_click) &&
(XfwmButtonClickType) passdata->clicks == XFWM_BUTTON_CLICK))
{
keep_going = FALSE;
}
status = EVENT_FILTER_STOP;
}
else if (event->meta.type == XFWM_EVENT_MOTION)
{
passdata->xcurrent = event->motion.x_root;
passdata->ycurrent = event->motion.y_root;
if ((ABS (passdata->x0 - passdata->xcurrent) > passdata->double_click_distance) ||
(ABS (passdata->y0 - passdata->ycurrent) > passdata->double_click_distance))
{
keep_going = FALSE;
}
status = EVENT_FILTER_STOP;
}
else if ((event->meta.xevent->type == DestroyNotify) || (event->meta.xevent->type == UnmapNotify))
{
if (event->meta.window == passdata->w)
{
/* Discard, mark the click as undefined */
passdata->clicks = (guint) XFWM_BUTTON_UNDEFINED;
keep_going = FALSE;
}
}
if (!keep_going)
{
TRACE ("click type=%u", passdata->clicks);
TRACE ("time=%ims (timeout=%ims)", (gint) passdata->tcurrent - (gint) passdata->t0, passdata->double_click_time);
TRACE ("dist x=%i (max=%i)", ABS (passdata->x0 - passdata->xcurrent), passdata->double_click_distance);
TRACE ("dist y=%i (max=%i)", ABS (passdata->y0 - passdata->ycurrent), passdata->double_click_distance);
typeOfClick_end (data);
}
return status;
}
static XfwmButtonClickType
typeOfClick (ScreenInfo *screen_info, Window w, XfwmEventButton *event, gboolean allow_double_click)
{
DisplayInfo *display_info;
XfwmButtonClickData passdata;
gboolean g;
g_return_val_if_fail (screen_info != NULL, XFWM_BUTTON_UNDEFINED);
g_return_val_if_fail (event != NULL, XFWM_BUTTON_UNDEFINED);
g_return_val_if_fail (w != None, XFWM_BUTTON_UNDEFINED);
display_info = screen_info->display_info;
g = myScreenGrabPointer (screen_info, FALSE, DOUBLE_CLICK_GRAB, None, event->time);
if (!g)
{
TRACE ("grab failed");
gdk_display_beep (display_info->gdisplay);
myScreenUngrabPointer (screen_info, event->time);
return XFWM_BUTTON_UNDEFINED;
}
passdata.display_info = display_info;
passdata.button = event->button;
passdata.w = w;
passdata.x0 = event->x_root;
passdata.y0 = event->y_root;
passdata.t0 = event->time;
passdata.xcurrent = passdata.x0;
passdata.ycurrent = passdata.y0;
passdata.tcurrent = passdata.t0;
passdata.clicks = 1;
passdata.allow_double_click = allow_double_click;
passdata.double_click_time = display_info->double_click_time;
passdata.double_click_distance = display_info->double_click_distance;
TRACE ("double click time= %i, distance=%i\n", display_info->double_click_time,
display_info->double_click_distance);
passdata.timeout = g_timeout_add_full (G_PRIORITY_HIGH,
display_info->double_click_time,
typeOfClick_end,
&passdata, NULL);
TRACE ("entering loop");
eventFilterPush (display_info->xfilter, typeOfClick_event_filter, &passdata);
gtk_main ();
eventFilterPop (display_info->xfilter);
TRACE ("leaving loop");
myScreenUngrabPointer (screen_info, myDisplayGetCurrentTime (display_info));
return (XfwmButtonClickType) passdata.clicks;
}
static void
toggle_show_desktop (ScreenInfo *screen_info)
{
screen_info->show_desktop = !screen_info->show_desktop;
setHint (screen_info->display_info, screen_info->xroot, NET_SHOWING_DESKTOP,
screen_info->show_desktop);
sendRootMessage (screen_info, NET_SHOWING_DESKTOP, screen_info->show_desktop,
myDisplayGetCurrentTime (screen_info->display_info));
}
static eventFilterStatus
handleMotionNotify (DisplayInfo *display_info, XfwmEventMotion *event)
{
TRACE ("entering");
return EVENT_FILTER_REMOVE;
}
static eventFilterStatus
handleKeyPress (DisplayInfo *display_info, XfwmEventKey *event)
{
eventFilterStatus status;
ScreenInfo *screen_info;
ScreenInfo *ev_screen_info;
Client *c;
int key;
TRACE ("entering");
ev_screen_info = myDisplayGetScreenFromRoot (display_info, event->root);
if (!ev_screen_info)
{
return EVENT_FILTER_PASS;
}
status = EVENT_FILTER_PASS;
c = clientGetFocus ();
if (c)
{
screen_info = c->screen_info;
key = myScreenGetKeyPressed (screen_info, event);
status = EVENT_FILTER_REMOVE;
switch (key)
{
case KEY_MOVE:
clientMove (c, NULL);
break;
case KEY_RESIZE:
clientResize (c, CORNER_BOTTOM_RIGHT, NULL);
break;
case KEY_CYCLE_WINDOWS:
case KEY_CYCLE_REVERSE_WINDOWS:
clientCycle (c, event);
break;
case KEY_CLOSE_WINDOW:
clientClose (c);
break;
case KEY_HIDE_WINDOW:
if (CLIENT_CAN_HIDE_WINDOW (c))
{
clientWithdraw (c, c->win_workspace, TRUE);
}
break;
case KEY_MAXIMIZE_WINDOW:
clientToggleMaximized (c, CLIENT_FLAG_MAXIMIZED, TRUE);
break;
case KEY_MAXIMIZE_VERT:
clientToggleMaximized (c, CLIENT_FLAG_MAXIMIZED_VERT, TRUE);
break;
case KEY_MAXIMIZE_HORIZ:
clientToggleMaximized (c, CLIENT_FLAG_MAXIMIZED_HORIZ, TRUE);
break;
case KEY_SHADE_WINDOW:
clientToggleShaded (c);
break;
case KEY_STICK_WINDOW:
if (FLAG_TEST(c->xfwm_flags, XFWM_FLAG_HAS_STICK))
{
clientToggleSticky (c, TRUE);
frameQueueDraw (c, FALSE);
}
break;
case KEY_RAISE_WINDOW:
clientRaise (c, None);
break;
case KEY_LOWER_WINDOW:
clientLower (c, None);
break;
case KEY_RAISELOWER_WINDOW:
if (clientIsTopMost (c))
{
clientLower (c, None);
}
else
{
clientRaise (c, None);
}
break;
case KEY_TOGGLE_ABOVE:
clientToggleLayerAbove (c);
break;
case KEY_TOGGLE_FULLSCREEN:
clientToggleFullscreen (c);
break;
case KEY_MOVE_NEXT_WORKSPACE:
workspaceSwitch (screen_info, screen_info->current_ws + 1, c, TRUE, event->time);
break;
case KEY_MOVE_PREV_WORKSPACE:
workspaceSwitch (screen_info, screen_info->current_ws - 1, c, TRUE, event->time);
break;
case KEY_MOVE_UP_WORKSPACE:
workspaceMove (screen_info, -1, 0, c, event->time);
break;
case KEY_MOVE_DOWN_WORKSPACE:
workspaceMove (screen_info, 1, 0, c, event->time);
break;
case KEY_MOVE_LEFT_WORKSPACE:
workspaceMove (screen_info, 0, -1, c, event->time);
break;
case KEY_MOVE_RIGHT_WORKSPACE:
workspaceMove (screen_info, 0, 1, c, event->time);
break;
case KEY_MOVE_WORKSPACE_1:
case KEY_MOVE_WORKSPACE_2:
case KEY_MOVE_WORKSPACE_3:
case KEY_MOVE_WORKSPACE_4:
case KEY_MOVE_WORKSPACE_5:
case KEY_MOVE_WORKSPACE_6:
case KEY_MOVE_WORKSPACE_7:
case KEY_MOVE_WORKSPACE_8:
case KEY_MOVE_WORKSPACE_9:
case KEY_MOVE_WORKSPACE_10:
case KEY_MOVE_WORKSPACE_11:
case KEY_MOVE_WORKSPACE_12:
if ((guint) (key - KEY_MOVE_WORKSPACE_1) < screen_info->workspace_count)
{
clientRaise (c, None);
clientSetWorkspace (c, key - KEY_MOVE_WORKSPACE_1, TRUE);
}
break;
case KEY_POPUP_MENU:
show_window_menu (c, frameExtentX (c) + frameLeft (c),
frameExtentY (c) + frameTop (c),
Button1, event->time, TRUE);
break;
case KEY_FILL_WINDOW:
clientFill (c, CLIENT_FILL);
break;
case KEY_FILL_VERT:
clientFill (c, CLIENT_FILL_VERT);
break;
case KEY_FILL_HORIZ:
clientFill (c, CLIENT_FILL_HORIZ);
break;
case KEY_TILE_DOWN:
clientToggleTile (c, TILE_DOWN);
break;
case KEY_TILE_LEFT:
clientToggleTile (c, TILE_LEFT);
break;
case KEY_TILE_RIGHT:
clientToggleTile (c, TILE_RIGHT);
break;
case KEY_TILE_UP:
clientToggleTile (c, TILE_UP);
break;
case KEY_TILE_DOWN_LEFT:
clientToggleTile (c, TILE_DOWN_LEFT);
break;
case KEY_TILE_DOWN_RIGHT:
clientToggleTile (c, TILE_DOWN_RIGHT);
break;
case KEY_TILE_UP_LEFT:
clientToggleTile (c, TILE_UP_LEFT);
break;
case KEY_TILE_UP_RIGHT:
clientToggleTile (c, TILE_UP_RIGHT);
break;
default:
break;
}
}
else
{
key = myScreenGetKeyPressed (ev_screen_info, event);
switch (key)
{
case KEY_CYCLE_WINDOWS:
status = EVENT_FILTER_REMOVE;
if (ev_screen_info->clients)
{
clientCycle (ev_screen_info->clients->prev, event);
}
break;
case KEY_CLOSE_WINDOW:
status = EVENT_FILTER_REMOVE;
if (display_info->session)
{
xfce_sm_client_request_shutdown(display_info->session, XFCE_SM_CLIENT_SHUTDOWN_HINT_LOGOUT);
}
break;
default:
break;
}
}
switch (key)
{
case KEY_SWITCH_WINDOW:
clientSwitchWindow ();
break;
case KEY_SWITCH_APPLICATION:
clientSwitchApp ();
break;
case KEY_NEXT_WORKSPACE:
status = EVENT_FILTER_REMOVE;
workspaceSwitch (ev_screen_info, ev_screen_info->current_ws + 1, NULL, TRUE, event->time);
break;
case KEY_PREV_WORKSPACE:
status = EVENT_FILTER_REMOVE;
workspaceSwitch (ev_screen_info, ev_screen_info->current_ws - 1, NULL, TRUE, event->time);
break;
case KEY_UP_WORKSPACE:
status = EVENT_FILTER_REMOVE;
workspaceMove(ev_screen_info, -1, 0, NULL, event->time);
break;
case KEY_DOWN_WORKSPACE:
status = EVENT_FILTER_REMOVE;
workspaceMove(ev_screen_info, 1, 0, NULL, event->time);
break;
case KEY_LEFT_WORKSPACE:
status = EVENT_FILTER_REMOVE;
workspaceMove(ev_screen_info, 0, -1, NULL, event->time);
break;
case KEY_RIGHT_WORKSPACE:
status = EVENT_FILTER_REMOVE;
workspaceMove(ev_screen_info, 0, 1, NULL, event->time);
break;
case KEY_ADD_WORKSPACE:
status = EVENT_FILTER_REMOVE;
workspaceSetCount (ev_screen_info, ev_screen_info->workspace_count + 1);
break;
case KEY_DEL_WORKSPACE:
status = EVENT_FILTER_REMOVE;
workspaceSetCount (ev_screen_info, ev_screen_info->workspace_count - 1);
break;
case KEY_ADD_ADJACENT_WORKSPACE:
workspaceInsert (ev_screen_info, ev_screen_info->current_ws + 1);
break;
case KEY_DEL_ACTIVE_WORKSPACE:
workspaceDelete (ev_screen_info, ev_screen_info->current_ws);
break;
case KEY_WORKSPACE_1:
case KEY_WORKSPACE_2:
case KEY_WORKSPACE_3:
case KEY_WORKSPACE_4:
case KEY_WORKSPACE_5:
case KEY_WORKSPACE_6:
case KEY_WORKSPACE_7:
case KEY_WORKSPACE_8:
case KEY_WORKSPACE_9:
case KEY_WORKSPACE_10:
case KEY_WORKSPACE_11:
case KEY_WORKSPACE_12:
status = EVENT_FILTER_REMOVE;
if ((guint) (key - KEY_WORKSPACE_1) < ev_screen_info->workspace_count)
{
workspaceSwitch (ev_screen_info, key - KEY_WORKSPACE_1, NULL, TRUE, event->time);
}
break;
case KEY_SHOW_DESKTOP:
status = EVENT_FILTER_REMOVE;
toggle_show_desktop (ev_screen_info);
break;
default:
break;
}
/* Release pending events */
XAllowEvents (display_info->dpy, SyncKeyboard, CurrentTime);
return status;
}
static eventFilterStatus
handleKeyRelease (DisplayInfo *display_info, XfwmEventKey *event)
{
TRACE ("entering");
/* Release pending events */
XAllowEvents (display_info->dpy, SyncKeyboard, CurrentTime);
return EVENT_FILTER_PASS;
}
/* User has clicked on an edge or corner.
* Button 1 : Raise and resize
* Button 2 : Move
* Button 3 : Resize
*/
static void
edgeButton (Client *c, int part, XfwmEventButton *event)
{
ScreenInfo *screen_info;
XfwmButtonClickType tclick;
guint state;
screen_info = c->screen_info;
state = event->state & MODIFIER_MASK;
if (event->button == Button2)
{
tclick = typeOfClick (screen_info, c->window, event, FALSE);
if (tclick == XFWM_BUTTON_CLICK)
{
clientLower (c, None);
}
else if (tclick == XFWM_BUTTON_DRAG)
{
clientMove (c, event);
}
}
else if ((event->button == Button1) || (event->button == Button3))
{
if ((event->button == Button1) ||
((screen_info->params->easy_click) && (state == screen_info->params->easy_click)))
{
if (!(c->type & WINDOW_TYPE_DONT_FOCUS))
{
clientSetFocus (screen_info, c, event->time, NO_FOCUS_FLAG);
}
clientRaise (c, None);
}
tclick = typeOfClick (screen_info, c->window, event, TRUE);
if (tclick == XFWM_BUTTON_DOUBLE_CLICK)
{
switch (part)
{
case CORNER_COUNT + SIDE_LEFT:
case CORNER_COUNT + SIDE_RIGHT:
clientFill(c, CLIENT_FILL_HORIZ);
break;
case CORNER_COUNT + SIDE_TOP:
case CORNER_COUNT + SIDE_BOTTOM:
clientFill(c, CLIENT_FILL_VERT);
break;
default:
clientFill(c, CLIENT_FILL);
break;
}
}
else if (tclick == XFWM_BUTTON_DRAG)
{
clientResize (c, part, event);
}
}
}
static int
edgeGetPart (Client *c, XfwmEventButton *event)
{
int part, x_corner_pixels, y_corner_pixels, x_distance, y_distance;
/* Corner is 1/3 of the side */
x_corner_pixels = MAX(c->width / 3, 50);
y_corner_pixels = MAX(c->height / 3, 50);
/* Distance from event to edge of client window */
x_distance = c->width / 2 - abs(c->width / 2 - event->x);
y_distance = c->height / 2 - abs(c->height / 2 - event->y);
if (x_distance < x_corner_pixels && y_distance < y_corner_pixels)
{
/* In a corner */
if (event->x < c->width / 2)
{
if (event->y < c->height / 2)
{
part = CORNER_TOP_LEFT;
}
else
{
part = CORNER_BOTTOM_LEFT;
}
}
else
{
if (event->y < c->height / 2)
{
part = CORNER_TOP_RIGHT;
}
else
{
part = CORNER_BOTTOM_RIGHT;
}
}
}
else
{
/* Not a corner - some side */
if (x_distance / x_corner_pixels < y_distance / y_corner_pixels)
{
/* Left or right side */
if (event->x < c->width / 2)
{
part = CORNER_COUNT + SIDE_LEFT;
}
else
{
part = CORNER_COUNT + SIDE_RIGHT;
}
}
else
{
/* Top or bottom side */
if (event->y < c->height / 2)
{
part = CORNER_COUNT + SIDE_TOP;
}
else
{
part = CORNER_COUNT + SIDE_BOTTOM;
}
}
}
return part;
}
static void
button1Action (Client *c, XfwmEventButton *event)
{
ScreenInfo *screen_info;
XfwmButtonClickType tclick;
g_return_if_fail (c != NULL);
g_return_if_fail (event != NULL);
screen_info = c->screen_info;
if (!(c->type & WINDOW_TYPE_DONT_FOCUS))
{
clientSetFocus (screen_info, c, event->time, NO_FOCUS_FLAG);
}
clientRaise (c, None);
tclick = typeOfClick (screen_info, c->window, event, TRUE);
if ((tclick == XFWM_BUTTON_DRAG) || (tclick == XFWM_BUTTON_CLICK_AND_DRAG))
{
clientMove (c, event);
}
else if (tclick == XFWM_BUTTON_DOUBLE_CLICK)
{
switch (screen_info->params->double_click_action)
{
case DOUBLE_CLICK_ACTION_MAXIMIZE:
clientToggleMaximized (c, CLIENT_FLAG_MAXIMIZED, TRUE);
break;
case DOUBLE_CLICK_ACTION_SHADE:
clientToggleShaded (c);
break;
case DOUBLE_CLICK_ACTION_FILL:
clientFill(c, CLIENT_FILL);
break;
case DOUBLE_CLICK_ACTION_HIDE:
if (CLIENT_CAN_HIDE_WINDOW (c))
{
clientWithdraw (c, c->win_workspace, TRUE);
}
break;
case DOUBLE_CLICK_ACTION_ABOVE:
clientToggleLayerAbove (c);
break;
default:
break;
}
}
}
static void
titleButton (Client *c, guint state, XfwmEventButton *event)
{
ScreenInfo *screen_info;
g_return_if_fail (c != NULL);
g_return_if_fail (event != NULL);
/* Get Screen data from the client itself */
screen_info = c->screen_info;
if (event->button == Button1)
{
button1Action (c, event);
}
else if (event->button == Button2)
{
clientLower (c, None);
}
else if (event->button == Button3)
{
XfwmButtonClickType tclick;
tclick = typeOfClick (screen_info, c->window, event, FALSE);
if (tclick == XFWM_BUTTON_DRAG)
{
clientMove (c, event);
}
else if (tclick != XFWM_BUTTON_UNDEFINED)
{
if (!(c->type & WINDOW_TYPE_DONT_FOCUS))
{
clientSetFocus (screen_info, c, event->time, NO_FOCUS_FLAG);
}
if (screen_info->params->raise_on_click)
{
clientRaise (c, None);
}
xfwm_device_button_update_window (event, event->root);
if (screen_info->button_handler_id)
{
g_signal_handler_disconnect (G_OBJECT (myScreenGetGtkWidget (screen_info)), screen_info->button_handler_id);
}
screen_info->button_handler_id = g_signal_connect (G_OBJECT (myScreenGetGtkWidget (screen_info)),
"button_press_event", G_CALLBACK (show_popup_cb), (gpointer) c);
/* Let GTK handle this for us. */
}
}
else if (event->button == Button4)
{
/* Mouse wheel scroll up */
#ifdef HAVE_COMPOSITOR
if ((state) && (state == screen_info->params->easy_click) && compositorIsActive (screen_info))
{
clientIncOpacity(c);
}
else
#endif /* HAVE_COMPOSITOR */
if (!FLAG_TEST (c->flags, CLIENT_FLAG_SHADED))
{
if (screen_info->params->mousewheel_rollup)
{
clientShade (c);
}
}
}
else if (event->button == Button5)
{
/* Mouse wheel scroll down */
#ifdef HAVE_COMPOSITOR
if ((state) && (state == screen_info->params->easy_click) && compositorIsActive (screen_info))
{
clientDecOpacity(c);
}
else
#endif /* HAVE_COMPOSITOR */
if (FLAG_TEST (c->flags, CLIENT_FLAG_SHADED))
{
if (screen_info->params->mousewheel_rollup)
{
clientUnshade (c);
}
}
}
#ifdef HAVE_COMPOSITOR
else if (screen_info->params->horiz_scroll_opacity)
{
if (event->button == Button6)
{
/* Mouse wheel scroll left, or left side button */
clientDecOpacity(c);
}
else if (event->button == Button7)
{
/* Mouse wheel scroll right, or right side button */
clientIncOpacity(c);
}
}
#endif /* HAVE_COMPOSITOR */
}
static void
rootScrollButton (DisplayInfo *display_info, XfwmEventButton *event)
{
static guint32 lastscroll = CurrentTime;
ScreenInfo *screen_info;
if ((event->time - lastscroll) < 25) /* ms */
{
/* Too many events in too little time, drop this event... */
return;
}
lastscroll = event->time;
/* Get the screen structure from the root of the event */
screen_info = myDisplayGetScreenFromRoot (display_info, event->root);
if (!screen_info)
{
return;
}
if (event->button == Button4)
{
workspaceSwitch (screen_info, screen_info->current_ws - 1, NULL, TRUE, event->time);
}
else if (event->button == Button5)
{
workspaceSwitch (screen_info, screen_info->current_ws + 1, NULL, TRUE, event->time);
}
}
static eventFilterStatus
handleButtonPress (DisplayInfo *display_info, XfwmEventButton *event)
{
ScreenInfo *screen_info;
Client *c;
Window win;
guint state, part;
gboolean replay;
TRACE ("entering");
replay = FALSE;
c = myDisplayGetClientFromWindow (display_info, event->meta.window,
SEARCH_FRAME | SEARCH_WINDOW);
if (c)
{
state = event->state & MODIFIER_MASK;
win = event->subwindow;
screen_info = c->screen_info;
if ((event->button == Button1) && (state) && (state == screen_info->params->easy_click))
{
button1Action (c, event);
}
else if ((event->button == Button2) && (state) && (state == screen_info->params->easy_click))
{
clientLower (c, None);
}
else if ((event->button == Button3) && (state) && (state == screen_info->params->easy_click))
{
part = edgeGetPart (c, event);
edgeButton (c, part, event);
}
#ifdef HAVE_COMPOSITOR
else if ((event->button == Button4) && (screen_info->params->zoom_desktop) && (state) &&
(state == screen_info->params->easy_click) && compositorIsActive (screen_info))
{
compositorZoomIn(screen_info, event);
}
else if ((event->button == Button5) && (screen_info->params->zoom_desktop) && (state) &&
(state == screen_info->params->easy_click) && compositorIsActive (screen_info))
{
compositorZoomOut(screen_info, event);
}
#endif /* HAVE_COMPOSITOR */
else if ((event->button == Button8) && (state) && (state == screen_info->params->easy_click))
{
workspaceSwitch (screen_info, screen_info->current_ws - 1, NULL, TRUE, event->time);
}
else if ((event->button == Button9) && (state) && (state == screen_info->params->easy_click))
{
workspaceSwitch (screen_info, screen_info->current_ws + 1, NULL, TRUE, event->time);
}
else if (WIN_IS_BUTTON (win))
{
if (event->button <= Button3)
{
if (!(c->type & WINDOW_TYPE_DONT_FOCUS))
{
clientSetFocus (screen_info, c, event->time, NO_FOCUS_FLAG);
}
if (screen_info->params->raise_on_click)
{
clientClearDelayedRaise ();
clientRaise (c, None);
}
clientButtonPress (c, win, event);
}
}
else if (win == MYWINDOW_XWINDOW (c->title))
{
titleButton (c, state, event);
}
else if (win == MYWINDOW_XWINDOW (c->buttons[MENU_BUTTON]))
{
if (event->button == Button1)
{
XfwmButtonClickType tclick;
tclick = typeOfClick (screen_info, c->window, event, TRUE);
if (tclick == XFWM_BUTTON_DOUBLE_CLICK)
{
clientClose (c);
}
else if (tclick != XFWM_BUTTON_UNDEFINED)
{
if (!(c->type & WINDOW_TYPE_DONT_FOCUS))
{
clientSetFocus (screen_info, c, event->time, NO_FOCUS_FLAG);
}
if (screen_info->params->raise_on_click)
{
clientClearDelayedRaise ();
clientRaise (c, None);
}
xfwm_device_button_update_window (event, event->root);
if (screen_info->button_handler_id)
{
g_signal_handler_disconnect (G_OBJECT (myScreenGetGtkWidget (screen_info)), screen_info->button_handler_id);
}
screen_info->button_handler_id = g_signal_connect (G_OBJECT (myScreenGetGtkWidget (screen_info)),
"button_press_event", G_CALLBACK (show_popup_cb), (gpointer) c);
/* Let GTK handle this for us. */
}
}
}
else if (win == MYWINDOW_XWINDOW (c->corners[CORNER_TOP_LEFT]))
{
edgeButton (c, CORNER_TOP_LEFT, event);
}
else if (win == MYWINDOW_XWINDOW (c->corners[CORNER_TOP_RIGHT]))
{
edgeButton (c, CORNER_TOP_RIGHT, event);
}
else if (win == MYWINDOW_XWINDOW (c->corners[CORNER_BOTTOM_LEFT]))
{
edgeButton (c, CORNER_BOTTOM_LEFT, event);
}
else if (win == MYWINDOW_XWINDOW (c->corners[CORNER_BOTTOM_RIGHT]))
{
edgeButton (c, CORNER_BOTTOM_RIGHT, event);
}
else if (win == MYWINDOW_XWINDOW (c->sides[SIDE_BOTTOM]))
{
edgeButton (c, CORNER_COUNT + SIDE_BOTTOM, event);
}
else if (win == MYWINDOW_XWINDOW (c->sides[SIDE_TOP]))
{
edgeButton (c, CORNER_COUNT + SIDE_TOP, event);
}
else if (win == MYWINDOW_XWINDOW (c->sides[SIDE_LEFT]))
{
edgeButton (c, CORNER_COUNT + SIDE_LEFT, event);
}
else if (win == MYWINDOW_XWINDOW (c->sides[SIDE_RIGHT]))
{
edgeButton (c, CORNER_COUNT + SIDE_RIGHT, event);
}
else if (event->meta.window == c->window)
{
replay = TRUE;
if (((screen_info->params->raise_with_any_button) && (c->type & WINDOW_REGULAR_FOCUSABLE))
|| (event->button == Button1))
{
if (!(c->type & WINDOW_TYPE_DONT_FOCUS))
{
clientSetFocus (screen_info, c, event->time, NO_FOCUS_FLAG);
}
if ((screen_info->params->raise_on_click) ||
!FLAG_TEST (c->xfwm_flags, XFWM_FLAG_HAS_BORDER))
{
clientClearDelayedRaise ();
clientRaise (c, None);
}
}
}
}
else
{
/*
The event did not occur in one of our known good clients...
Get the screen structure from the root of the event.
*/
screen_info = myDisplayGetScreenFromRoot (display_info, event->root);
if (screen_info)
{
if ((event->meta.window == screen_info->xroot) && (screen_info->params->scroll_workspaces)
&& ((event->button == Button4) || (event->button == Button5)))
{
rootScrollButton (display_info, event);
}
else
{
myDisplayErrorTrapPush (display_info);
xfwm_device_ungrab (display_info->devices, &display_info->devices->pointer,
display_info->dpy, event->time);
XSendEvent (display_info->dpy, screen_info->xfwm4_win, FALSE, SubstructureNotifyMask, event->meta.xevent);
myDisplayErrorTrapPopIgnored (display_info);
}
}
}
/* Release pending events */
XAllowEvents (display_info->dpy, replay ? ReplayPointer : SyncPointer, CurrentTime);
return EVENT_FILTER_REMOVE;
}
static eventFilterStatus
handleButtonRelease (DisplayInfo *display_info, XfwmEventButton *event)
{
ScreenInfo *screen_info;
TRACE ("entering");
/* Get the screen structure from the root of the event */
screen_info = myDisplayGetScreenFromRoot (display_info, event->root);
myDisplayErrorTrapPush (display_info);
if (screen_info)
{
XSendEvent (display_info->dpy, screen_info->xfwm4_win, FALSE, SubstructureNotifyMask,
(XEvent *) event->meta.xevent);
}
/* Release pending events */
XAllowEvents (display_info->dpy, SyncPointer, CurrentTime);
myDisplayErrorTrapPopIgnored (display_info);
return EVENT_FILTER_REMOVE;
}
static eventFilterStatus
handleDestroyNotify (DisplayInfo *display_info, XDestroyWindowEvent * ev)
{
eventFilterStatus status;
GList *list_of_windows;
Client *c;
#ifdef ENABLE_KDE_SYSTRAY_PROXY
ScreenInfo *screen_info;
#endif
TRACE ("window (0x%lx)", ev->window);
status = EVENT_FILTER_PASS;
#ifdef ENABLE_KDE_SYSTRAY_PROXY
screen_info = myDisplayGetScreenFromSystray (display_info, ev->window);
if (screen_info)
{
/* systray window is gone */
screen_info->systray = None;
return EVENT_FILTER_REMOVE;
}
#endif
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_WINDOW);
if (c)
{
TRACE ("client \"%s\" (0x%lx)", c->name, c->window);
list_of_windows = clientListTransientOrModal (c);
clientPassFocus (c->screen_info, c, list_of_windows);
clientUnframe (c, FALSE);
g_list_free (list_of_windows);
status = EVENT_FILTER_REMOVE;
}
return status;
}
static eventFilterStatus
handleMapRequest (DisplayInfo *display_info, XMapRequestEvent * ev)
{
eventFilterStatus status;
Client *c;
TRACE ("window (0x%lx)", ev->window);
status = EVENT_FILTER_PASS;
if (ev->window == None)
{
TRACE ("mapping None ???");
return status;
}
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_WINDOW);
if (c)
{
ScreenInfo *screen_info = c->screen_info;
if (FLAG_TEST (c->xfwm_flags, XFWM_FLAG_MAP_PENDING))
{
TRACE ("ignoring MapRequest on window (0x%lx)", ev->window);
}
if (FLAG_TEST (c->xfwm_flags, XFWM_FLAG_WAS_SHOWN))
{
clientClearAllShowDesktop (screen_info);
}
clientShow (c, TRUE);
if (FLAG_TEST (c->flags, CLIENT_FLAG_STICKY) ||
(c->win_workspace == screen_info->current_ws))
{
clientFocusNew(c);
}
}
else
{
clientFrame (display_info, ev->window, FALSE);
}
status = EVENT_FILTER_REMOVE;
return status;
}
static eventFilterStatus
handleMapNotify (DisplayInfo *display_info, XMapEvent * ev)
{
Client *c;
TRACE ("window (0x%lx)", ev->window);
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_WINDOW);
if (c)
{
TRACE ("client \"%s\" (0x%lx)", c->name, c->window);
if (FLAG_TEST (c->xfwm_flags, XFWM_FLAG_MAP_PENDING))
{
FLAG_UNSET (c->xfwm_flags, XFWM_FLAG_MAP_PENDING);
}
return EVENT_FILTER_REMOVE;
}
return EVENT_FILTER_PASS;
}
static eventFilterStatus
handleUnmapNotify (DisplayInfo *display_info, XUnmapEvent * ev)
{
eventFilterStatus status;
ScreenInfo *screen_info;
GList *list_of_windows;
Client *c;
TRACE ("window (0x%lx)", ev->window);
status = EVENT_FILTER_PASS;
if (ev->from_configure)
{
TRACE ("ignoring UnmapNotify caused by parent's resize");
return status;
}
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_WINDOW);
if (c)
{
TRACE ("client \"%s\" (0x%lx) ignore_unmap %i", c->name, c->window, c->ignore_unmap);
screen_info = c->screen_info;
if ((ev->event != ev->window) && (ev->event != screen_info->xroot || !ev->send_event))
{
TRACE ("event ignored");
return status;
}
status = EVENT_FILTER_REMOVE;
if (FLAG_TEST (c->xfwm_flags, XFWM_FLAG_MAP_PENDING))
{
/*
* This UnmapNotify event is caused by reparenting
* so we just ignore it, so the window won't return
* to withdrawn state by mistake.
*/
TRACE ("client \"%s\" is not mapped, event ignored", c->name);
return status;
}
/*
* ICCCM spec states that a client wishing to switch
* to WithdrawnState should send a synthetic UnmapNotify
* with the event field set to root if the client window
* is already unmapped.
* Therefore, bypass the ignore_unmap counter and
* unframe the client.
*/
if ((ev->event == screen_info->xroot) && (ev->send_event))
{
if (!FLAG_TEST (c->xfwm_flags, XFWM_FLAG_VISIBLE))
{
TRACE ("ICCCM UnmapNotify for \"%s\"", c->name);
list_of_windows = clientListTransientOrModal (c);
clientPassFocus (screen_info, c, list_of_windows);
clientUnframe (c, FALSE);
g_list_free (list_of_windows);
}
return status;
}
if (c->ignore_unmap)
{
c->ignore_unmap--;
TRACE ("ignore_unmap for \"%s\" is now %i", c->name, c->ignore_unmap);
}
else
{
TRACE ("unmapping \"%s\" as ignore_unmap is %i", c->name, c->ignore_unmap);
list_of_windows = clientListTransientOrModal (c);
clientPassFocus (screen_info, c, list_of_windows);
clientUnframe (c, FALSE);
g_list_free (list_of_windows);
}
}
return status;
}
static eventFilterStatus
handleConfigureNotify (DisplayInfo *display_info, XConfigureEvent * ev)
{
TRACE ("entering");
return EVENT_FILTER_PASS;
}
static eventFilterStatus
handleConfigureRequest (DisplayInfo *display_info, XConfigureRequestEvent * ev)
{
Client *c;
XWindowChanges wc;
TRACE ("window (0x%lx)", ev->window);
wc.x = ev->x;
wc.y = ev->y;
wc.width = ev->width;
wc.height = ev->height;
wc.sibling = ev->above;
wc.stack_mode = ev->detail;
wc.border_width = ev->border_width;
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_WINDOW);
if (!c)
{
/* Some app tend or try to manipulate the wm frame to achieve fullscreen mode */
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_FRAME);
if (c)
{
TRACE ("client %s (0x%lx) is attempting to manipulate its frame!", c->name, c->window);
if (ev->value_mask & CWX)
{
wc.x += frameLeft (c);
}
if (ev->value_mask & CWY)
{
wc.y += frameTop (c);
}
if (ev->value_mask & CWWidth)
{
wc.width -= frameLeft (c) + frameRight (c);
}
if (ev->value_mask & CWHeight)
{
wc.height -= frameTop (c) + frameBottom (c);
}
/* We don't allow changing stacking order by accessing the frame
window because that would break the layer management in xfwm4
*/
ev->value_mask &= ~(CWSibling | CWStackMode);
}
}
if (c)
{
TRACE ("window \"%s\" (0x%lx)", c->name, c->window);
if (FLAG_TEST (c->xfwm_flags, XFWM_FLAG_MOVING_RESIZING))
{
/* Sorry, but it's not the right time for configure request */
return EVENT_FILTER_REMOVE;
}
clientAdjustCoordGravity (c, c->gravity, &wc, &ev->value_mask);
clientMoveResizeWindow (c, &wc, ev->value_mask);
}
else
{
TRACE ("unmanaged configure request for window 0x%lx", ev->window);
myDisplayErrorTrapPush (display_info);
XConfigureWindow (display_info->dpy, ev->window, ev->value_mask, &wc);
myDisplayErrorTrapPopIgnored (display_info);
}
return EVENT_FILTER_REMOVE;
}
static eventFilterStatus
handleEnterNotify (DisplayInfo *display_info, XfwmEventCrossing *event)
{
ScreenInfo *screen_info;
Client *c;
int b;
gboolean need_redraw;
/* See http://rfc-ref.org/RFC-TEXTS/1013/chapter12.html for details */
TRACE ("entering");
if ((event->mode == NotifyGrab) || (event->mode == NotifyUngrab)
|| (event->detail > NotifyNonlinearVirtual))
{
/* We're not interested in such notifications */
return EVENT_FILTER_PASS;
}
TRACE ("window (0x%lx)", event->meta.window);
need_redraw = FALSE;
c = myDisplayGetClientFromWindow (display_info, event->meta.window,
SEARCH_FRAME | SEARCH_BUTTON);
if (c)
{
screen_info = c->screen_info;
TRACE ("client \"%s\"", c->name);
if (!(screen_info->params->click_to_focus) && clientAcceptFocus (c))
{
if (!(c->type & (WINDOW_DOCK | WINDOW_DESKTOP)))
{
if(screen_info->params->focus_delay)
{
clientClearDelayedFocus ();
clientAddDelayedFocus (c, event->time);
}
else
{
clientSetFocus (c->screen_info, c, event->time, NO_FOCUS_FLAG);
}
}
else
{
clientClearDelayedFocus ();
}
}
if (c == clientGetFocus ())
{
for (b = 0; b < BUTTON_COUNT; b++)
{
if (MYWINDOW_XWINDOW(c->buttons[b]) == event->meta.window)
{
if (!xfwmPixmapNone(clientGetButtonPixmap(c, b, PRELIGHT)))
{
c->button_status[b] = BUTTON_STATE_PRELIGHT;
need_redraw = TRUE;
}
}
}
if (need_redraw)
{
frameQueueDraw (c, FALSE);
}
}
/* No need to process the event any further */
return EVENT_FILTER_REMOVE;
}
/* The event was not for a client window */
if (display_info->nb_screens > 1)
{
/* Wrap workspace/wrap windows is disabled with multiscreen */
return EVENT_FILTER_REMOVE;
}
/* Get the screen structure from the root of the event */
screen_info = myDisplayGetScreenFromRoot (display_info, event->root);
if (!screen_info)
{
return EVENT_FILTER_PASS;
}
if (screen_info->params->wrap_workspaces && screen_info->workspace_count > 1)
{
clientMoveWarp (NULL, screen_info, &event->x_root, &event->y_root, event->time);
}
return EVENT_FILTER_REMOVE;
}
static eventFilterStatus
handleLeaveNotify (DisplayInfo *display_info, XfwmEventCrossing *event)
{
Client *c;
int b;
gboolean need_redraw;
TRACE ("entering");
need_redraw = FALSE;
c = myDisplayGetClientFromWindow (display_info, event->meta.window,
SEARCH_FRAME | SEARCH_BUTTON);
if (c)
{
for (b = 0; b < BUTTON_COUNT; b++)
{
if ((c->button_status[b] == BUTTON_STATE_PRELIGHT) || (c->button_status[b] == BUTTON_STATE_PRESSED))
{
if (MYWINDOW_XWINDOW(c->buttons[b]) == event->meta.window)
{
c->button_status[b] = BUTTON_STATE_NORMAL;
need_redraw = TRUE;
}
}
}
if (need_redraw)
{
frameQueueDraw (c, FALSE);
}
/* No need to process the event any further */
return EVENT_FILTER_REMOVE;
}
return EVENT_FILTER_PASS;
}
static eventFilterStatus
handleFocusIn (DisplayInfo *display_info, XFocusChangeEvent * ev)
{
ScreenInfo *screen_info;
Client *c, *user_focus, *current_focus;
/* See http://rfc-ref.org/RFC-TEXTS/1013/chapter12.html for details */
TRACE ("window (0x%lx) mode = %s",
ev->window,
(ev->mode == NotifyNormal) ?
"NotifyNormal" :
(ev->mode == NotifyWhileGrabbed) ?
"NotifyWhileGrabbed" :
(ev->mode == NotifyGrab) ?
"NotifyGrab" :
(ev->mode == NotifyUngrab) ?
"NotifyUngrab" :
"(unknown)");
TRACE ("window (0x%lx) detail = %s",
ev->window,
(ev->detail == NotifyAncestor) ?
"NotifyAncestor" :
(ev->detail == NotifyVirtual) ?
"NotifyVirtual" :
(ev->detail == NotifyInferior) ?
"NotifyInferior" :
(ev->detail == NotifyNonlinear) ?
"NotifyNonlinear" :
(ev->detail == NotifyNonlinearVirtual) ?
"NotifyNonlinearVirtual" :
(ev->detail == NotifyPointer) ?
"NotifyPointer" :
(ev->detail == NotifyPointerRoot) ?
"NotifyPointerRoot" :
(ev->detail == NotifyDetailNone) ?
"NotifyDetailNone" :
"(unknown)");
if ((ev->mode == NotifyGrab) || (ev->mode == NotifyUngrab))
{
/* We're not interested in such notifications */
return EVENT_FILTER_PASS;
}
screen_info = myDisplayGetScreenFromWindow (display_info, ev->window);
if (screen_info &&
((ev->detail == NotifyDetailNone) ||
((ev->mode == NotifyNormal) && (ev->detail == NotifyInferior))))
{
/*
Handle unexpected focus transition to root (means that an unknown
window has vanished and the focus is returned to the root).
*/
c = clientGetFocusOrPending ();
clientSetFocus (screen_info, c, getXServerTime (display_info), FOCUS_FORCE);
return EVENT_FILTER_PASS;
}
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_FRAME | SEARCH_WINDOW);
user_focus = clientGetUserFocus ();
current_focus = clientGetFocus ();
TRACE ("window (0x%lx)", ev->window);
if ((c) && (c != current_focus) && (FLAG_TEST (c->xfwm_flags, XFWM_FLAG_VISIBLE)))
{
TRACE ("focus transfered to \"%s\" (0x%lx)", c->name, c->window);
screen_info = c->screen_info;
clientUpdateFocus (screen_info, c, FOCUS_SORT);
if ((user_focus != c) && (user_focus != NULL))
{
/*
Focus stealing prevention:
Some apps tend to focus the window directly. If focus stealing prevention is enabled,
we revert the user set focus to the window that we think has focus and then set the
demand attention flag.
Note that focus stealing prevention is ignored between windows of the same group or
between windows that have a transient relationship, as some apps tend to play with
focus with their "own" windows.
*/
if (screen_info->params->prevent_focus_stealing &&
!clientSameGroup (c, user_focus) &&
!clientIsTransientOrModalFor (c, user_focus))
{
TRACE ("setting focus back to \"%s\" (0x%lx)", user_focus->name, user_focus->window);
clientSetFocus (user_focus->screen_info, user_focus, getXServerTime (display_info), NO_FOCUS_FLAG);
if (current_focus)
{
TRACE ("setting WM_STATE_DEMANDS_ATTENTION flag on \"%s\" (0x%lx)", c->name, c->window);
FLAG_SET (c->flags, CLIENT_FLAG_DEMANDS_ATTENTION);
clientSetNetState (c);
}
}
}
}
return EVENT_FILTER_REMOVE;
}
static eventFilterStatus
handleFocusOut (DisplayInfo *display_info, XFocusChangeEvent * ev)
{
Client *c;
/* See http://rfc-ref.org/RFC-TEXTS/1013/chapter12.html for details */
TRACE ("window (0x%lx) mode = %s",
ev->window,
(ev->mode == NotifyNormal) ?
"NotifyNormal" :
(ev->mode == NotifyWhileGrabbed) ?
"NotifyWhileGrabbed" :
"(unknown)");
TRACE ("window (0x%lx) detail = %s",
ev->window,
(ev->detail == NotifyAncestor) ?
"NotifyAncestor" :
(ev->detail == NotifyVirtual) ?
"NotifyVirtual" :
(ev->detail == NotifyInferior) ?
"NotifyInferior" :
(ev->detail == NotifyNonlinear) ?
"NotifyNonlinear" :
(ev->detail == NotifyNonlinearVirtual) ?
"NotifyNonlinearVirtual" :
(ev->detail == NotifyPointer) ?
"NotifyPointer" :
(ev->detail == NotifyPointerRoot) ?
"NotifyPointerRoot" :
(ev->detail == NotifyDetailNone) ?
"NotifyDetailNone" :
"(unknown)");
if ((ev->mode == NotifyGrab) || (ev->mode == NotifyUngrab) ||
(ev->detail == NotifyInferior) || (ev->detail > NotifyNonlinearVirtual))
{
/* We're not interested in such notifications */
return EVENT_FILTER_PASS;
}
if ((ev->mode == NotifyNormal)
&& ((ev->detail == NotifyNonlinear)
|| (ev->detail == NotifyNonlinearVirtual)))
{
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_FRAME | SEARCH_WINDOW);
TRACE ("window (0x%lx)", ev->window);
if ((c) && (c == clientGetFocus ()))
{
TRACE ("focus lost from \"%s\" (0x%lx)", c->name, c->window);
clientUpdateFocus (c->screen_info, NULL, NO_FOCUS_FLAG);
clientClearDelayedRaise ();
}
}
return EVENT_FILTER_REMOVE;
}
static eventFilterStatus
handlePropertyNotify (DisplayInfo *display_info, XPropertyEvent * ev)
{
eventFilterStatus status;
ScreenInfo *screen_info;
Client *c;
TRACE ("entering");
status = EVENT_FILTER_PASS;
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_WINDOW | SEARCH_WIN_USER_TIME);
if (c)
{
status = EVENT_FILTER_REMOVE;
screen_info = c->screen_info;
if (ev->atom == XA_WM_NORMAL_HINTS)
{
TRACE ("client \"%s\" (0x%lx) has received a XA_WM_NORMAL_HINTS notify", c->name, c->window);
clientGetWMNormalHints (c, TRUE);
}
else if ((ev->atom == XA_WM_NAME) ||
(ev->atom == display_info->atoms[NET_WM_NAME]) ||
(ev->atom == display_info->atoms[WM_CLIENT_MACHINE]))
{
TRACE ("client \"%s\" (0x%lx) has received a XA_WM_NAME/NET_WM_NAME/WM_CLIENT_MACHINE notify", c->name, c->window);
clientUpdateName (c);
}
else if (ev->atom == display_info->atoms[MOTIF_WM_HINTS])
{
TRACE ("client \"%s\" (0x%lx) has received a MOTIF_WM_HINTS notify", c->name, c->window);
clientGetMWMHints (c);
clientApplyMWMHints (c, TRUE);
}
else if (ev->atom == XA_WM_HINTS)
{
TRACE ("client \"%s\" (0x%lx) has received a XA_WM_HINTS notify", c->name, c->window);
/* Free previous wmhints if any */
if (c->wmhints)
{
XFree (c->wmhints);
}
myDisplayErrorTrapPush (display_info);
c->wmhints = XGetWMHints (display_info->dpy, c->window);
myDisplayErrorTrapPopIgnored (display_info);
if (c->wmhints)
{
if (c->wmhints->flags & WindowGroupHint)
{
c->group_leader = c->wmhints->window_group;
}
if ((c->wmhints->flags & IconPixmapHint) && (screen_info->params->show_app_icon))
{
clientUpdateIcon (c);
}
if (HINTS_ACCEPT_INPUT (c->wmhints))
{
FLAG_SET (c->wm_flags, WM_FLAG_INPUT);
}
else
{
FLAG_UNSET (c->wm_flags, WM_FLAG_INPUT);
}
}
clientUpdateUrgency (c);
}
else if (ev->atom == display_info->atoms[WM_PROTOCOLS])
{
TRACE ("client \"%s\" (0x%lx) has received a WM_PROTOCOLS notify", c->name, c->window);
clientGetWMProtocols (c);
}
else if (ev->atom == display_info->atoms[WM_TRANSIENT_FOR])
{
Window w;
TRACE ("client \"%s\" (0x%lx) has received a WM_TRANSIENT_FOR notify", c->name, c->window);
c->transient_for = None;
getTransientFor (display_info, c->screen_info->xroot, c->window, &w);
if (clientCheckTransientWindow (c, w))
{
c->transient_for = w;
}
/* Recompute window type as it may have changed */
clientWindowType (c);
}
else if (ev->atom == display_info->atoms[NET_WM_WINDOW_TYPE])
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_WINDOW_TYPE notify", c->name, c->window);
clientGetNetWmType (c);
frameQueueDraw (c, TRUE);
}
else if ((ev->atom == display_info->atoms[NET_WM_STRUT]) ||
(ev->atom == display_info->atoms[NET_WM_STRUT_PARTIAL]))
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_STRUT notify", c->name, c->window);
if (clientGetNetStruts (c) && FLAG_TEST (c->xfwm_flags, XFWM_FLAG_VISIBLE))
{
workspaceUpdateArea (c->screen_info);
}
}
else if (ev->atom == display_info->atoms[WM_COLORMAP_WINDOWS])
{
TRACE ("client \"%s\" (0x%lx) has received a WM_COLORMAP_WINDOWS notify", c->name, c->window);
clientUpdateColormaps (c);
if (c == clientGetFocus ())
{
clientInstallColormaps (c);
}
}
else if (ev->atom == display_info->atoms[NET_WM_USER_TIME])
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_USER_TIME notify", c->name, c->window);
clientGetUserTime (c);
}
else if (ev->atom == display_info->atoms[NET_WM_USER_TIME_WINDOW])
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_USER_TIME_WINDOW notify", c->name, c->window);
clientRemoveUserTimeWin (c);
c->user_time_win = getNetWMUserTimeWindow(display_info, c->window);
clientAddUserTimeWin (c);
}
else if (ev->atom == display_info->atoms[NET_WM_PID])
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_PID notify", c->name, c->window);
if (c->pid == 0)
{
c->pid = getWindowPID (display_info, c->window);
TRACE ("client \"%s\" (0x%lx) updated PID = %i", c->name, c->window, c->pid);
}
}
else if (ev->atom == display_info->atoms[NET_WM_WINDOW_OPACITY])
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_WINDOW_OPACITY notify", c->name, c->window);
if (!getOpacity (display_info, c->window, &c->opacity))
{
c->opacity = NET_WM_OPAQUE;
}
clientSetOpacity (c, c->opacity, 0, 0);
}
else if (ev->atom == display_info->atoms[NET_WM_WINDOW_OPACITY_LOCKED])
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_WINDOW_OPACITY_LOCKED notify", c->name, c->window);
if (getOpacityLock (display_info, c->window))
{
FLAG_SET (c->xfwm_flags, XFWM_FLAG_OPACITY_LOCKED);
}
else
{
FLAG_UNSET (c->xfwm_flags, XFWM_FLAG_OPACITY_LOCKED);
}
}
else if ((screen_info->params->show_app_icon) &&
((ev->atom == display_info->atoms[NET_WM_ICON]) ||
(ev->atom == display_info->atoms[KWM_WIN_ICON])))
{
clientUpdateIcon (c);
}
else if (ev->atom == display_info->atoms[GTK_FRAME_EXTENTS])
{
TRACE ("client \"%s\" (0x%lx) has received a GTK_FRAME_EXTENTS notify", c->name, c->window);
if (clientGetGtkFrameExtents (c))
{
if (FLAG_TEST (c->flags, CLIENT_FLAG_MAXIMIZED))
{
clientUpdateMaximizeSize (c);
}
else if (c->tile_mode != TILE_NONE)
{
clientUpdateTileSize (c);
}
}
}
else if (ev->atom == display_info->atoms[GTK_HIDE_TITLEBAR_WHEN_MAXIMIZED])
{
TRACE ("client \"%s\" (0x%lx) has received a GTK_HIDE_TITLEBAR_WHEN_MAXIMIZED notify", c->name, c->window);
if (clientGetGtkHideTitlebar (c))
{
if (FLAG_TEST (c->flags, CLIENT_FLAG_MAXIMIZED))
{
clientUpdateMaximizeSize (c);
clientReconfigure (c, CFG_FORCE_REDRAW);
}
}
}
#ifdef HAVE_STARTUP_NOTIFICATION
else if (ev->atom == display_info->atoms[NET_STARTUP_ID])
{
if (c->startup_id)
{
g_free (c->startup_id);
c->startup_id = NULL;
}
getWindowStartupId (display_info, c->window, &c->startup_id);
}
#endif /* HAVE_STARTUP_NOTIFICATION */
#ifdef HAVE_XSYNC
else if (ev->atom == display_info->atoms[NET_WM_SYNC_REQUEST_COUNTER])
{
TRACE ("window 0x%lx has received NET_WM_SYNC_REQUEST_COUNTER", c->window);
clientGetXSyncCounter (c);
}
#endif /* HAVE_XSYNC */
return status;
}
screen_info = myDisplayGetScreenFromWindow (display_info, ev->window);
if (!screen_info)
{
return status;
}
if (ev->atom == display_info->atoms[NET_DESKTOP_NAMES])
{
gchar **names;
guint items;
TRACE ("root has received a NET_DESKTOP_NAMES notify");
if (getUTF8StringList (display_info, screen_info->xroot, NET_DESKTOP_NAMES, &names, &items))
{
workspaceSetNames (screen_info, names, items);
}
}
else if (ev->atom == display_info->atoms[NET_DESKTOP_LAYOUT])
{
TRACE ("root has received a NET_DESKTOP_LAYOUT notify");
getDesktopLayout(display_info, screen_info->xroot, screen_info->workspace_count, &screen_info->desktop_layout);
placeSidewalks(screen_info, screen_info->params->wrap_workspaces);
}
return status;
}
static eventFilterStatus
handleClientMessage (DisplayInfo *display_info, XClientMessageEvent * ev)
{
eventFilterStatus status;
ScreenInfo *screen_info;
Client *c;
TRACE ("window (0x%lx)", ev->window);
if (ev->window == None)
{
/* Some do not set the window member, not much we can do without */
return EVENT_FILTER_PASS;
}
status = EVENT_FILTER_PASS;
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_WINDOW);
if (c)
{
status = EVENT_FILTER_REMOVE;
if ((ev->message_type == display_info->atoms[WM_CHANGE_STATE]) && (ev->format == 32) && (ev->data.l[0] == IconicState))
{
TRACE ("client \"%s\" (0x%lx) has received a WM_CHANGE_STATE event", c->name, c->window);
if (!FLAG_TEST (c->flags, CLIENT_FLAG_ICONIFIED))
{
clientWithdraw (c, c->win_workspace, TRUE);
}
}
else if ((ev->message_type == display_info->atoms[NET_WM_DESKTOP]) && (ev->format == 32))
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_DESKTOP event", c->name, c->window);
clientUpdateNetWmDesktop (c, ev);
}
else if ((ev->message_type == display_info->atoms[NET_CLOSE_WINDOW]) && (ev->format == 32))
{
TRACE ("client \"%s\" (0x%lx) has received a NET_CLOSE_WINDOW event", c->name, c->window);
clientClose (c);
}
else if ((ev->message_type == display_info->atoms[NET_WM_STATE]) && (ev->format == 32))
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_STATE event", c->name, c->window);
clientUpdateNetState (c, ev);
}
else if ((ev->message_type == display_info->atoms[NET_WM_MOVERESIZE]) && (ev->format == 32))
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_MOVERESIZE event", c->name, c->window);
clientNetMoveResize (c, ev);
}
else if ((ev->message_type == display_info->atoms[NET_MOVERESIZE_WINDOW]) && (ev->format == 32))
{
TRACE ("client \"%s\" (0x%lx) has received a NET_MOVERESIZE_WINDOW event", c->name, c->window);
clientNetMoveResizeWindow (c, ev);
}
else if ((ev->message_type == display_info->atoms[NET_ACTIVE_WINDOW]) && (ev->format == 32))
{
TRACE ("client \"%s\" (0x%lx) has received a NET_ACTIVE_WINDOW event", c->name, c->window);
clientHandleNetActiveWindow (c, (guint32) ev->data.l[1], (gboolean) (ev->data.l[0] == 1));
}
else if (ev->message_type == display_info->atoms[NET_REQUEST_FRAME_EXTENTS])
{
TRACE ("client \"%s\" (0x%lx) has received a NET_REQUEST_FRAME_EXTENTS event", c->name, c->window);
setNetFrameExtents (display_info, c->window, frameTop (c), frameLeft (c),
frameRight (c), frameBottom (c));
}
else if (ev->message_type == display_info->atoms[NET_WM_FULLSCREEN_MONITORS])
{
TRACE ("client \"%s\" (0x%lx) has received a NET_WM_FULLSCREEN_MONITORS event", c->name, c->window);
clientSetFullscreenMonitor (c, (gint) ev->data.l[0], (gint) ev->data.l[1],
(gint) ev->data.l[2], (gint) ev->data.l[3]);
}
else if ((ev->message_type == display_info->atoms[GTK_SHOW_WINDOW_MENU]) && (ev->format == 32))
{
TRACE ("client \"%s\" (0x%lx) has received a GTK_SHOW_WINDOW_MENU event", c->name, c->window);
show_window_menu (c, (gint) ev->data.l[1], (gint) ev->data.l[2], Button3, (Time) myDisplayGetCurrentTime (display_info), TRUE);
}
}
else
{
screen_info = myDisplayGetScreenFromWindow (display_info, ev->window);
if (!screen_info)
{
return status;
}
status = EVENT_FILTER_REMOVE;
if ((ev->message_type == display_info->atoms[NET_CURRENT_DESKTOP]) && (ev->format == 32))
{
TRACE ("root has received a win_workspace or a NET_CURRENT_DESKTOP event %li", ev->data.l[0]);
if ((ev->data.l[0] >= 0) && (ev->data.l[0] < (long) screen_info->workspace_count) &&
(ev->data.l[0] != (long) screen_info->current_ws))
{
workspaceSwitch (screen_info, ev->data.l[0], NULL, TRUE,
myDisplayGetTime (display_info, (guint32) ev->data.l[1]));
}
}
else if ((ev->message_type == display_info->atoms[NET_NUMBER_OF_DESKTOPS]) && (ev->format == 32))
{
TRACE ("root has received a win_workspace_count event");
if (ev->data.l[0] != (long) screen_info->workspace_count)
{
workspaceSetCount (screen_info, ev->data.l[0]);
getDesktopLayout(display_info, screen_info->xroot, screen_info->workspace_count, &screen_info->desktop_layout);
}
}
else if ((ev->message_type == display_info->atoms[NET_SHOWING_DESKTOP]) && (ev->format == 32))
{
TRACE ("root has received a NET_SHOWING_DESKTOP event");
screen_info->show_desktop = (ev->data.l[0] != 0);
clientToggleShowDesktop (screen_info);
setHint (display_info, screen_info->xroot, NET_SHOWING_DESKTOP, ev->data.l[0]);
}
else if (ev->message_type == display_info->atoms[NET_REQUEST_FRAME_EXTENTS])
{
TRACE ("window (0x%lx) has received a NET_REQUEST_FRAME_EXTENTS event", ev->window);
/* Size estimate from the decoration extents */
setNetFrameExtents (display_info, ev->window,
frameDecorationTop (screen_info),
frameDecorationLeft (screen_info),
frameDecorationRight (screen_info),
frameDecorationBottom (screen_info));
}
else if ((ev->message_type == display_info->atoms[MANAGER]) && (ev->format == 32))
{
Atom selection;
TRACE ("window (0x%lx) has received a MANAGER event", ev->window);
selection = (Atom) ev->data.l[1];
#ifdef ENABLE_KDE_SYSTRAY_PROXY
if (selection == screen_info->net_system_tray_selection)
{
TRACE ("root has received a NET_SYSTEM_TRAY_MANAGER selection event");
screen_info->systray = getSystrayWindow (display_info, screen_info->net_system_tray_selection);
}
else
#endif
if (myScreenCheckWMAtom (screen_info, selection))
{
TRACE ("root has received a WM_Sn selection event");
display_info->quit = TRUE;
}
}
else if (ev->message_type == display_info->atoms[WM_PROTOCOLS])
{
if ((Atom) ev->data.l[0] == display_info->atoms[NET_WM_PING])
{
TRACE ("root has received a NET_WM_PING (pong) event\n");
clientReceiveNetWMPong (screen_info, (guint32) ev->data.l[1]);
}
}
else if (ev->message_type == display_info->atoms[GTK_READ_RCFILES])
{
TRACE ("window (0x%lx) has received a GTK_READ_RCFILES event", ev->window);
set_reload (display_info);
}
else
{
TRACE ("unidentified client message for window 0x%lx", ev->window);
}
}
return status;
}
static eventFilterStatus
handleSelectionClear (DisplayInfo *display_info, XSelectionClearEvent * ev)
{
eventFilterStatus status;
ScreenInfo *screen_info, *pscreen;
GSList *list;
DBG ("entering handleSelectionClear 0x%lx", ev->window);
status = EVENT_FILTER_PASS;
screen_info = NULL;
for (list = display_info->screens; list; list = g_slist_next (list))
{
pscreen = (ScreenInfo *) list->data;
if (ev->window == pscreen->xfwm4_win)
{
screen_info = pscreen;
break;
}
}
if (screen_info)
{
if (myScreenCheckWMAtom (screen_info, ev->selection))
{
TRACE ("root has received a WM_Sn selection event");
display_info->quit = TRUE;
status = EVENT_FILTER_REMOVE;
}
}
return status;
}
static eventFilterStatus
handleShape (DisplayInfo *display_info, XShapeEvent * ev)
{
Client *c;
gboolean update;
TRACE ("entering");
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_WINDOW);
if (c)
{
update = FALSE;
if (ev->kind == ShapeInput)
{
frameSetShapeInput (c);
update = TRUE;
}
else if (ev->kind == ShapeBounding)
{
if ((ev->shaped) && !FLAG_TEST (c->flags, CLIENT_FLAG_HAS_SHAPE))
{
update = TRUE;
FLAG_SET (c->flags, CLIENT_FLAG_HAS_SHAPE);
clientGetMWMHints (c);
clientApplyMWMHints (c, TRUE);
}
else if (!(ev->shaped) && FLAG_TEST (c->flags, CLIENT_FLAG_HAS_SHAPE))
{
update = TRUE;
FLAG_UNSET (c->flags, CLIENT_FLAG_HAS_SHAPE);
clientGetMWMHints (c);
clientApplyMWMHints (c, TRUE);
}
}
if (!update)
{
frameQueueDraw (c, FALSE);
}
}
return EVENT_FILTER_REMOVE;
}
static eventFilterStatus
handleColormapNotify (DisplayInfo *display_info, XColormapEvent * ev)
{
Client *c;
TRACE ("entering");
c = myDisplayGetClientFromWindow (display_info, ev->window, SEARCH_WINDOW);
if ((c) && (ev->window == c->window) && (ev->new))
{
if (c == clientGetFocus ())
{
clientInstallColormaps (c);
}
return EVENT_FILTER_REMOVE;
}
return EVENT_FILTER_PASS;
}
static eventFilterStatus
handleReparentNotify (DisplayInfo *display_info, XReparentEvent * ev)
{
TRACE ("window 0x%lx reparented in 0x%lx", ev->window, ev->parent);
return EVENT_FILTER_PASS;
}
#ifdef HAVE_XSYNC
static eventFilterStatus
handleXSyncAlarmNotify (DisplayInfo *display_info, XSyncAlarmNotifyEvent * ev)
{
Client *c;
TRACE ("entering");
if (display_info->have_xsync)
{
c = myDisplayGetClientFromXSyncAlarm (display_info, ev->alarm);
if (c)
{
clientXSyncUpdateValue (c, ev->counter_value);
}
}
return EVENT_FILTER_REMOVE;
}
#endif /* HAVE_XSYNC */
static eventFilterStatus
handleEvent (DisplayInfo *display_info, XfwmEvent *event)
{
eventFilterStatus status;
status = EVENT_FILTER_PASS;
TRACE ("entering");
/* Update the display time */
myDisplayUpdateCurrentTime (display_info, event);
sn_process_event (event->meta.xevent);
switch (event->meta.type)
{
case XFWM_EVENT_KEY:
if (event->key.pressed)
{
status = handleKeyPress (display_info, &event->key);
}
else
{
status = handleKeyRelease (display_info, &event->key);
}
break;
case XFWM_EVENT_BUTTON:
if (event->button.pressed)
{
status = handleButtonPress (display_info, &event->button);
}
else
{
status = handleButtonRelease (display_info, &event->button);
}
break;
case XFWM_EVENT_MOTION:
status = handleMotionNotify (display_info, &event->motion);
break;
case XFWM_EVENT_CROSSING:
if (event->crossing.enter)
{
status = handleEnterNotify (display_info, &event->crossing);
}
else
{
status = handleLeaveNotify (display_info, &event->crossing);
}
break;
case XFWM_EVENT_XEVENT:
switch (event->meta.xevent->type)
{
case DestroyNotify:
status = handleDestroyNotify (display_info, (XDestroyWindowEvent *) event->meta.xevent);
break;
case UnmapNotify:
status = handleUnmapNotify (display_info, (XUnmapEvent *) event->meta.xevent);
break;
case MapRequest:
status = handleMapRequest (display_info, (XMapRequestEvent *) event->meta.xevent);
break;
case MapNotify:
status = handleMapNotify (display_info, (XMapEvent *) event->meta.xevent);
break;
case ConfigureNotify:
status = handleConfigureNotify (display_info, (XConfigureEvent *) event->meta.xevent);
break;
case ConfigureRequest:
status = handleConfigureRequest (display_info, (XConfigureRequestEvent *) event->meta.xevent);
break;
case FocusIn:
status = handleFocusIn (display_info, (XFocusChangeEvent *) event->meta.xevent);
break;
case FocusOut:
status = handleFocusOut (display_info, (XFocusChangeEvent *) event->meta.xevent);
break;
case PropertyNotify:
status = handlePropertyNotify (display_info, (XPropertyEvent *) event->meta.xevent);
break;
case ClientMessage:
status = handleClientMessage (display_info, (XClientMessageEvent *) event->meta.xevent);
break;
case SelectionClear:
status = handleSelectionClear (display_info, (XSelectionClearEvent *) event->meta.xevent);
break;
case ColormapNotify:
handleColormapNotify (display_info, (XColormapEvent *) event->meta.xevent);
break;
case ReparentNotify:
status = handleReparentNotify (display_info, (XReparentEvent *) event->meta.xevent);
break;
default:
if ((display_info->have_shape) &&
(event->meta.xevent->type == display_info->shape_event_base))
{
status = handleShape (display_info, (XShapeEvent *) event->meta.xevent);
}
#ifdef HAVE_XSYNC
if ((display_info->have_xsync) &&
(event->meta.xevent->type == (display_info->xsync_event_base + XSyncAlarmNotify)))
{
status = handleXSyncAlarmNotify (display_info, (XSyncAlarmNotifyEvent *) event->meta.xevent);
}
#endif /* HAVE_XSYNC */
break;
}
break;
}
if (!gdk_events_pending () && !XPending (display_info->dpy))
{
if (display_info->reload)
{
reloadSettings (display_info, UPDATE_ALL);
display_info->reload = FALSE;
}
else if (display_info->quit)
{
/*
* Qutting on purpose, update session manager so
* it does not restart the program immediately
*/
xfce_sm_client_set_restart_style(display_info->session, XFCE_SM_CLIENT_RESTART_NORMAL);
gtk_main_quit ();
}
}
compositorHandleEvent (display_info, event->meta.xevent);
return status;
}
eventFilterStatus
xfwm4_event_filter (XfwmEvent *event, gpointer data)
{
eventFilterStatus status;
DisplayInfo *display_info;
display_info = (DisplayInfo *) data;
TRACE ("entering");
status = handleEvent (display_info, event);
TRACE ("leaving");
return EVENT_FILTER_STOP | status;
}
/* GTK specific stuff */
static void
menu_callback (Menu * menu, MenuOp op, Window xid, gpointer menu_data, gpointer item_data)
{
Client *c;
TRACE ("entering");
if (!xfwmWindowDeleted(&menu_event_window))
{
xfwmWindowDelete (&menu_event_window);
}
c = NULL;
if ((menu_data != NULL) && (xid != None))
{
ScreenInfo *screen_info = (ScreenInfo *) menu_data;
c = myScreenGetClientFromWindow (screen_info, xid, SEARCH_WINDOW);
}
if (c)
{
c->button_status[MENU_BUTTON] = BUTTON_STATE_NORMAL;
switch (op)
{
case MENU_OP_QUIT:
gtk_main_quit ();
break;
case MENU_OP_MAXIMIZE:
case MENU_OP_UNMAXIMIZE:
if (CLIENT_CAN_MAXIMIZE_WINDOW (c))
{
clientToggleMaximized (c, CLIENT_FLAG_MAXIMIZED, TRUE);
}
break;
case MENU_OP_MINIMIZE:
if (CLIENT_CAN_HIDE_WINDOW (c))
{
clientWithdraw (c, c->win_workspace, TRUE);
}
break;
case MENU_OP_MOVE:
clientMove (c, NULL);
break;
case MENU_OP_RESIZE:
clientResize (c, CORNER_BOTTOM_RIGHT, NULL);
break;
case MENU_OP_MINIMIZE_ALL:
clientWithdrawAll (c, c->win_workspace);
break;
case MENU_OP_UNMINIMIZE:
if (FLAG_TEST (c->xfwm_flags, XFWM_FLAG_WAS_SHOWN))
{
clientClearAllShowDesktop (c->screen_info);
}
clientShow (c, TRUE);
break;
case MENU_OP_SHADE:
case MENU_OP_UNSHADE:
clientToggleShaded (c);
break;
case MENU_OP_STICK:
clientToggleSticky (c, TRUE);
frameQueueDraw (c, FALSE);
break;
case MENU_OP_WORKSPACES:
clientSetWorkspace (c, GPOINTER_TO_INT (item_data), TRUE);
break;
case MENU_OP_DELETE:
clientClose (c);
break;
case MENU_OP_CONTEXT_HELP:
clientEnterContextMenuState (c);
break;
case MENU_OP_ABOVE:
clientToggleLayerAbove (c);
break;
case MENU_OP_NORMAL:
clientSetLayerNormal (c);
break;
case MENU_OP_BELOW:
clientToggleLayerBelow (c);
break;
case MENU_OP_FULLSCREEN:
case MENU_OP_UNFULLSCREEN:
clientToggleFullscreen (c);
break;
default:
frameQueueDraw (c, FALSE);
break;
}
}
else
{
gdk_display_beep (gdk_display_get_default ());
}
menu_free (menu);
}
void
initMenuEventWin (void)
{
xfwmWindowInit (&menu_event_window);
}
static void
show_window_menu (Client *c, gint px, gint py, guint button, guint32 timestamp, gboolean needscale)
{
ScreenInfo *screen_info;
DisplayInfo *display_info;
Menu *menu;
MenuOp ops;
MenuOp insensitive;
gboolean is_transient;
gint x, y;
gint scale = 1;
TRACE ("coords (%d,%d)", px, py);
if ((button != Button1) && (button != Button3))
{
return;
}
if (!c || !FLAG_TEST (c->xfwm_flags, XFWM_FLAG_VISIBLE))
{
return;
}
screen_info = c->screen_info;
display_info = screen_info->display_info;
is_transient = clientIsValidTransientOrModal (c);
scale = gdk_window_get_scale_factor (gdk_screen_get_root_window (screen_info->gscr));
x = px;
y = py;
if (needscale) {
x /= scale;
y /= scale;
}
c->button_status[MENU_BUTTON] = BUTTON_STATE_PRESSED;
frameQueueDraw (c, FALSE);
if (CLIENT_HAS_FRAME (c))
{
x = px;
y = c->y / scale;
if (needscale) {
x /= scale;
}
}
ops = MENU_OP_DELETE | MENU_OP_MINIMIZE_ALL | MENU_OP_WORKSPACES | MENU_OP_MOVE | MENU_OP_RESIZE;
insensitive = 0;
if (FLAG_TEST (c->flags, CLIENT_FLAG_MAXIMIZED))
{
ops |= MENU_OP_UNMAXIMIZE;
}
else
{
ops |= MENU_OP_MAXIMIZE;
}
if (!FLAG_TEST (c->xfwm_flags, XFWM_FLAG_HAS_MOVE))
{
insensitive |= MENU_OP_MOVE;
}
if (FLAG_TEST (c->flags, CLIENT_FLAG_ICONIFIED))
{
ops |= MENU_OP_UNMINIMIZE;
}
else
{
ops |= MENU_OP_MINIMIZE;
}
if (FLAG_TEST (c->flags, CLIENT_FLAG_SHADED))
{
ops |= MENU_OP_UNSHADE;
}
else
{
ops |= MENU_OP_SHADE;
}
if (!FLAG_TEST (c->flags, CLIENT_FLAG_STICKY))
{
ops |= MENU_OP_STICK;
}
if (!FLAG_TEST (c->xfwm_flags, XFWM_FLAG_HAS_BORDER))
{
insensitive |= MENU_OP_SHADE | MENU_OP_UNSHADE;
}
if (!FLAG_TEST (c->xfwm_flags, XFWM_FLAG_HAS_CLOSE))
{
insensitive |= MENU_OP_DELETE;
}
if (is_transient || !FLAG_TEST(c->xfwm_flags, XFWM_FLAG_HAS_STICK))
{
insensitive |= MENU_OP_STICK;
}
if (!CLIENT_CAN_HIDE_WINDOW (c))
{
insensitive |= MENU_OP_MINIMIZE;
}
if (!CLIENT_CAN_MAXIMIZE_WINDOW (c))
{
insensitive |= MENU_OP_MAXIMIZE;
}
if (!FLAG_TEST (c->xfwm_flags, XFWM_FLAG_HAS_MOVE))
{
insensitive |= MENU_OP_MOVE;
}
if (!FLAG_TEST_ALL (c->xfwm_flags, XFWM_FLAG_HAS_RESIZE | XFWM_FLAG_IS_RESIZABLE) ||
FLAG_TEST_ALL (c->flags, CLIENT_FLAG_MAXIMIZED))
{
insensitive |= MENU_OP_RESIZE;
}
if (FLAG_TEST (c->flags, CLIENT_FLAG_FULLSCREEN))
{
insensitive |= MENU_OP_SHADE | MENU_OP_MOVE | MENU_OP_RESIZE | MENU_OP_MAXIMIZE | MENU_OP_UNMAXIMIZE;
}
if (FLAG_TEST(c->flags, CLIENT_FLAG_FULLSCREEN))
{
ops |= MENU_OP_UNFULLSCREEN;
}
else
{
ops |= MENU_OP_FULLSCREEN;
}
if (is_transient || (c->type != WINDOW_NORMAL))
{
insensitive |= MENU_OP_FULLSCREEN | MENU_OP_UNFULLSCREEN;
}
if (FLAG_TEST(c->flags, CLIENT_FLAG_ABOVE))
{
ops |= MENU_OP_NORMAL | MENU_OP_BELOW;
}
else if (FLAG_TEST(c->flags, CLIENT_FLAG_BELOW))
{
ops |= MENU_OP_NORMAL | MENU_OP_ABOVE;
}
else
{
ops |= MENU_OP_ABOVE | MENU_OP_BELOW;
}
if (is_transient ||
!(c->type & WINDOW_REGULAR_FOCUSABLE) ||
FLAG_TEST (c->flags, CLIENT_FLAG_FULLSCREEN))
{
insensitive |= MENU_OP_NORMAL | MENU_OP_ABOVE | MENU_OP_BELOW;
}
/* KDE extension */
clientGetWMProtocols(c);
if (FLAG_TEST (c->wm_flags, WM_FLAG_CONTEXT_HELP))
{
ops |= MENU_OP_CONTEXT_HELP;
}
if (is_transient
|| !FLAG_TEST (c->xfwm_flags, XFWM_FLAG_HAS_STICK)
|| FLAG_TEST (c->flags, CLIENT_FLAG_STICKY))
{
insensitive |= MENU_OP_WORKSPACES;
}
if (screen_info->button_handler_id)
{
g_signal_handler_disconnect (G_OBJECT (myScreenGetGtkWidget (screen_info)), screen_info->button_handler_id);
}
screen_info->button_handler_id = g_signal_connect (G_OBJECT (myScreenGetGtkWidget (screen_info)),
"button_press_event", G_CALLBACK (show_popup_cb), (gpointer) NULL);
if (!xfwmWindowDeleted(&menu_event_window))
{
xfwmWindowDelete (&menu_event_window);
}
/*
Since all button press/release events are catched by the windows frames, there is some
side effect with GTK menu. When a menu is opened, any click on the window frame is not
detected as a click outside the menu, and the menu doesn't close.
To avoid this (painless but annoying) behavior, we just setup a no event window that
"hides" the events to regular windows.
That might look tricky, but it's very efficient and save plenty of lines of complicated
code.
Don't forget to delete that window once the menu is closed, though, or we'll get in
trouble.
*/
xfwmWindowTemp (screen_info,
NULL, 0,
screen_info->xroot,
&menu_event_window, 0, 0,
screen_info->width,
screen_info->height,
NoEventMask,
FALSE);
menu = menu_default (screen_info->gscr, c->window, ops, insensitive, menu_callback,
c->win_workspace, screen_info->workspace_count,
screen_info->workspace_names, screen_info->workspace_names_items,
display_info->xfilter, screen_info);
if (!menu_popup (menu, x, y, button, timestamp))
{
TRACE ("cannot open menu");
gdk_display_beep (display_info->gdisplay);
c->button_status[MENU_BUTTON] = BUTTON_STATE_NORMAL;
frameQueueDraw (c, FALSE);
xfwmWindowDelete (&menu_event_window);
menu_free (menu);
}
}
static gboolean
show_popup_cb (GtkWidget * widget, GdkEventButton * ev, gpointer data)
{
TRACE ("entering");
show_window_menu ((Client *) data, (gint) ev->x_root, (gint) ev->y_root, ev->button, ev->time, FALSE);
return TRUE;
}
static gboolean
set_reload (DisplayInfo *display_info)
{
TRACE ("setting reload flag so all prefs will be reread at next event loop");
display_info->reload = TRUE;
return TRUE;
}
static gboolean
double_click_time_cb (GObject * obj, GdkEvent * ev, gpointer data)
{
DisplayInfo *display_info;
GValue tmp_val = { 0, };
display_info = (DisplayInfo *) data;
g_return_val_if_fail (display_info, TRUE);
g_value_init (&tmp_val, G_TYPE_INT);
if (gdk_setting_get ("gtk-double-click-time", &tmp_val))
{
display_info->double_click_time = abs (g_value_get_int (&tmp_val));
}
return TRUE;
}
static gboolean
double_click_distance_cb (GObject * obj, GdkEvent * ev, gpointer data)
{
DisplayInfo *display_info;
GValue tmp_val = { 0, };
display_info = (DisplayInfo *) data;
g_return_val_if_fail (display_info, TRUE);
g_value_init (&tmp_val, G_TYPE_INT);
if (gdk_setting_get ("gtk-double-click-distance", &tmp_val))
{
display_info->double_click_distance = abs (g_value_get_int (&tmp_val));
}
return TRUE;
}
static void
cursor_theme_cb (GObject * obj, GParamSpec * pspec, gpointer data)
{
DisplayInfo * display_info;
GSList *list;
display_info = (DisplayInfo *) data;
g_return_if_fail (display_info);
myDisplayFreeCursor (display_info);
myDisplayCreateCursor (display_info);
for (list = display_info->screens; list; list = g_slist_next (list))
{
ScreenInfo *screen_info = (ScreenInfo *) list->data;
clientUpdateAllCursor (screen_info);
XDefineCursor (display_info->dpy, screen_info->xroot, display_info->root_cursor);
}
}
static void
update_screen_font (ScreenInfo *screen_info)
{
myScreenUpdateFontAttr (screen_info);
clientUpdateAllFrames (screen_info, UPDATE_FRAME);
}
static gboolean
refresh_font_cb (GObject * obj, GdkEvent * ev, gpointer data)
{
DisplayInfo * display_info;
GSList *list;
display_info = (DisplayInfo *) data;
g_return_val_if_fail (display_info, TRUE);
for (list = display_info->screens; list; list = g_slist_next (list))
{
update_screen_font (list->data);
}
return TRUE;
}
/*
* The size-changed signal is emitted when the pixel width or height
* of a screen changes.
*/
static void
size_changed_cb(GdkScreen *gscreen, gpointer data)
{
ScreenInfo *screen_info;
DisplayInfo *display_info;
gboolean size_changed;
TRACE ("entering");
screen_info = (ScreenInfo *) data;
g_return_if_fail (screen_info);
display_info = screen_info->display_info;
if (xfwm_get_n_monitors (screen_info->gscr) == 0)
{
/*
* Recent Xorg drivers disable the output when the lid
* is closed, leaving no active monitor, in that case simply
* ignore the event to avoid messing with windows' positions.
*/
return;
}
size_changed = myScreenComputeSize (screen_info);
if (size_changed)
{
myScreenInvalidateMonitorCache (screen_info);
setNetWorkarea (display_info, screen_info->xroot, screen_info->workspace_count,
screen_info->width, screen_info->height, screen_info->margins);
setNetDesktopInfo (display_info, screen_info->xroot, screen_info->current_ws,
screen_info->width, screen_info->height);
placeSidewalks (screen_info, screen_info->params->wrap_workspaces);
compositorUpdateScreenSize (screen_info);
}
clientScreenResize (screen_info, FALSE);
}
/*
* The monitors-changed signal is emitted when the number, size or
* position of the monitors attached to the screen change.
*/
static void
monitors_changed_cb(GdkScreen *gscreen, gpointer data)
{
ScreenInfo *screen_info;
DisplayInfo *display_info;
gint previous_num_monitors;
gboolean size_changed;
TRACE ("entering");
screen_info = (ScreenInfo *) data;
g_return_if_fail (screen_info);
display_info = screen_info->display_info;
if (xfwm_get_n_monitors (screen_info->gscr) == 0)
{
/*
* Recent Xorg drivers disable the output when the lid
* is closed, leaving no active monitor, in that case simply
* ignore the event to avoid messing with windows' positions.
*/
return;
}
/*
* We have added/removed a monitor or even changed the layout,
* the cache for monitor position we use in our screen structure
* is not valid anymore and potentially refers to a monitor that
* was just removed, so invalidate it.
*/
previous_num_monitors = screen_info->num_monitors;
myScreenInvalidateMonitorCache (screen_info);
myScreenRebuildMonitorIndex (screen_info);
size_changed = myScreenComputeSize (screen_info);
if (size_changed || (screen_info->num_monitors != previous_num_monitors))
{
setNetWorkarea (display_info, screen_info->xroot, screen_info->workspace_count,
screen_info->width, screen_info->height, screen_info->margins);
setNetDesktopInfo (display_info, screen_info->xroot, screen_info->current_ws,
screen_info->width, screen_info->height);
placeSidewalks (screen_info, screen_info->params->wrap_workspaces);
}
if (size_changed)
{
compositorUpdateScreenSize (screen_info);
}
clientScreenResize (screen_info, (screen_info->num_monitors < previous_num_monitors));
}
void
initPerScreenCallbacks (ScreenInfo *screen_info)
{
g_return_if_fail (screen_info);
screen_info->button_handler_id =
g_signal_connect (G_OBJECT (myScreenGetGtkWidget (screen_info)),
"button_press_event", G_CALLBACK (show_popup_cb), (gpointer) NULL);
g_object_connect (G_OBJECT(screen_info->gscr),
"signal::size-changed",
G_CALLBACK(size_changed_cb), (gpointer) (screen_info),
"signal::monitors-changed",
G_CALLBACK(monitors_changed_cb), (gpointer) (screen_info),
NULL);
g_signal_connect_swapped (G_OBJECT (myScreenGetGtkWidget (screen_info)),
"notify::scale-factor",
G_CALLBACK (update_screen_font),
screen_info);
}
void
initPerDisplayCallbacks (DisplayInfo *display_info)
{
GtkSettings *settings;
g_return_if_fail (display_info);
settings = gtk_settings_get_default ();
g_object_connect (settings,
"swapped-signal::notify::gtk-theme-name",
G_CALLBACK (set_reload), (gpointer) (display_info),
"swapped-signal::notify::gtk-font-name",
G_CALLBACK (set_reload), (gpointer) (display_info),
"signal::notify::gtk-double-click-time",
G_CALLBACK (double_click_time_cb), (gpointer) (display_info),
"signal::notify::gtk-double-click-distance",
G_CALLBACK (double_click_distance_cb), (gpointer) (display_info),
"signal::notify::gtk-cursor-theme-name",
G_CALLBACK (cursor_theme_cb), (gpointer) (display_info),
"signal::notify::gtk-cursor-theme-size",
G_CALLBACK (cursor_theme_cb), (gpointer) (display_info),
"signal::notify::gtk-xft-antialias",
G_CALLBACK (refresh_font_cb), (gpointer) (display_info),
"signal::notify::gtk-xft-dpi",
G_CALLBACK (refresh_font_cb), (gpointer) (display_info),
"signal::notify::gtk-xft-hinting",
G_CALLBACK (refresh_font_cb), (gpointer) (display_info),
"signal::notify::gtk-xft-hintstyle",
G_CALLBACK (refresh_font_cb), (gpointer) (display_info),
"signal::notify::gtk-xft-rgba",
G_CALLBACK (refresh_font_cb), (gpointer) (display_info),
NULL);
}
| xfce-mirror/xfwm4 | src/events.c | C | gpl-2.0 | 95,564 |
/**********************************************************************
Freeciv - Copyright (C) 1996 - A Kjeldberg, L Gregersen, P Unold
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
***********************************************************************/
#ifdef HAVE_CONFIG_H
#include <fc_config.h>
#endif
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* utility */
#include "bitvector.h"
#include "fcintl.h"
#include "log.h"
#include "mem.h"
#include "registry.h"
#include "shared.h"
#include "string_vector.h"
#include "support.h"
/* common */
#include "ai.h"
#include "base.h"
#include "capability.h"
#include "city.h"
#include "effects.h"
#include "fc_types.h"
#include "game.h"
#include "government.h"
#include "map.h"
#include "movement.h"
#include "name_translation.h"
#include "nation.h"
#include "packets.h"
#include "player.h"
#include "requirements.h"
#include "rgbcolor.h"
#include "specialist.h"
#include "tech.h"
#include "unit.h"
#include "unittype.h"
/* server */
#include "citytools.h"
#include "plrhand.h"
#include "settings.h"
#include "srv_main.h"
/* server/advisors */
#include "advruleset.h"
/* server/scripting */
#include "script_server.h"
#include "ruleset.h"
#define RULESET_CAPABILITIES "+Freeciv-2.4-ruleset"
/*
* Ruleset capabilities acceptable to this program:
*
* +Freeciv-2.3-ruleset
* - basic ruleset format for Freeciv versions 2.3.x; required
*
* +Freeciv-tilespec-Devel-YYYY.MMM.DD
* - ruleset of the development version at the given data
*/
/* RULESET_SUFFIX already used, no leading dot here */
#define RULES_SUFFIX "ruleset"
#define SCRIPT_SUFFIX "lua"
#define ADVANCE_SECTION_PREFIX "advance_"
#define BUILDING_SECTION_PREFIX "building_"
#define CITYSTYLE_SECTION_PREFIX "citystyle_"
#define EFFECT_SECTION_PREFIX "effect_"
#define GOVERNMENT_SECTION_PREFIX "government_"
#define NATION_GROUP_SECTION_PREFIX "ngroup" /* without underscore? */
#define NATION_SET_SECTION_PREFIX "nset" /* without underscore? */
#define NATION_SECTION_PREFIX "nation" /* without underscore? */
#define RESOURCE_SECTION_PREFIX "resource_"
#define BASE_SECTION_PREFIX "base_"
#define SPECIALIST_SECTION_PREFIX "specialist_"
#define TERRAIN_SECTION_PREFIX "terrain_"
#define UNIT_CLASS_SECTION_PREFIX "unitclass_"
#define UNIT_SECTION_PREFIX "unit_"
#define check_name(name) (check_strlen(name, MAX_LEN_NAME, NULL))
/* avoid re-reading files */
static const char name_too_long[] = "Name \"%s\" too long; truncating.";
#define MAX_SECTION_LABEL 64
#define section_strlcpy(dst, src) \
(void) loud_strlcpy(dst, src, MAX_SECTION_LABEL, name_too_long)
static char *resource_sections = NULL;
static char *terrain_sections = NULL;
static char *base_sections = NULL;
static struct section_file *openload_ruleset_file(const char *whichset);
static const char *check_ruleset_capabilities(struct section_file *file,
const char *us_capstr,
const char *filename);
static void load_tech_names(struct section_file *file);
static void load_unit_names(struct section_file *file);
static void load_building_names(struct section_file *file);
static void load_government_names(struct section_file *file);
static void load_terrain_names(struct section_file *file);
static void load_citystyle_names(struct section_file *file);
static void load_nation_names(struct section_file *file);
static void load_city_name_list(struct section_file *file,
struct nation_type *pnation,
const char *secfile_str1,
const char *secfile_str2);
static void load_ruleset_techs(struct section_file *file);
static void load_ruleset_units(struct section_file *file);
static void load_ruleset_buildings(struct section_file *file);
static void load_ruleset_governments(struct section_file *file);
static void load_ruleset_terrain(struct section_file *file);
static void load_ruleset_cities(struct section_file *file);
static void load_ruleset_effects(struct section_file *file);
static void load_ruleset_game(void);
static void send_ruleset_techs(struct conn_list *dest);
static void send_ruleset_unit_classes(struct conn_list *dest);
static void send_ruleset_units(struct conn_list *dest);
static void send_ruleset_buildings(struct conn_list *dest);
static void send_ruleset_terrain(struct conn_list *dest);
static void send_ruleset_resources(struct conn_list *dest);
static void send_ruleset_bases(struct conn_list *dest);
static void send_ruleset_governments(struct conn_list *dest);
static void send_ruleset_cities(struct conn_list *dest);
static void send_ruleset_game(struct conn_list *dest);
static void send_ruleset_team_names(struct conn_list *dest);
static bool load_ruleset_veteran(struct section_file *file,
const char *path,
struct veteran_system **vsystem, char *err,
size_t err_len);
static bool nation_has_initial_tech(struct nation_type *pnation,
struct advance *tech);
static bool sanity_check_ruleset_data(void);
static void ruleset_error_real(const char *file, const char *function,
int line, enum log_level level,
const char *format, ...)
fc__attribute((__format__ (__printf__, 5, 6)));
#define ruleset_error(level, format, ...) \
if (log_do_output_for_level(level)) { \
ruleset_error_real(__FILE__, __FUNCTION__, __FC_LINE__, \
level, format, ## __VA_ARGS__); \
}
/**************************************************************************
Notifications about ruleset errors to clients. Especially important in
case of internal server crashing.
**************************************************************************/
static void ruleset_error_real(const char *file, const char *function,
int line, enum log_level level,
const char *format, ...)
{
va_list args;
va_start(args, format);
vdo_log(file, function, line, FALSE, level, format, args);
va_end(args);
if (LOG_FATAL >= level) {
exit(EXIT_FAILURE);
}
}
/**************************************************************************
datafilename() wrapper: tries to match in two ways.
Returns NULL on failure, the (statically allocated) filename on success.
**************************************************************************/
static const char *valid_ruleset_filename(const char *subdir,
const char *name,
const char *extension)
{
char filename[512];
const char *dfilename;
fc_assert_ret_val(subdir && name && extension, NULL);
fc_snprintf(filename, sizeof(filename), "%s/%s.%s",
subdir, name, extension);
log_verbose("Trying \"%s\".", filename);
dfilename = fileinfoname(get_data_dirs(), filename);
if (dfilename) {
return dfilename;
}
fc_snprintf(filename, sizeof(filename), "default/%s.%s", name, extension);
log_verbose("Trying \"%s\": default ruleset directory.", filename);
dfilename = fileinfoname(get_data_dirs(), filename);
if (dfilename) {
return dfilename;
}
fc_snprintf(filename, sizeof(filename), "%s_%s.%s",
subdir, name, extension);
log_verbose("Trying \"%s\": alternative ruleset filename syntax.",
filename);
dfilename = fileinfoname(get_data_dirs(), filename);
if (dfilename) {
return dfilename;
} else {
ruleset_error(LOG_FATAL,
/* TRANS: message about an installation error. */
_("Could not find a readable \"%s.%s\" ruleset file."),
name, extension);
}
return(NULL);
}
/**************************************************************************
Do initial section_file_load on a ruleset file.
"whichset" = "techs", "units", "buildings", "terrain", ...
**************************************************************************/
static struct section_file *openload_ruleset_file(const char *whichset)
{
char sfilename[512];
const char *dfilename = valid_ruleset_filename(game.server.rulesetdir,
whichset, RULES_SUFFIX);
struct section_file *secfile;
/* Need to save a copy of the filename for following message, since
section_file_load() may call datafilename() for includes. */
sz_strlcpy(sfilename, dfilename);
if (!(secfile = secfile_load(sfilename, FALSE))) {
ruleset_error(LOG_FATAL, "Could not load ruleset '%s':\n%s",
sfilename, secfile_error());
}
return secfile;
}
/**************************************************************************
Parse script file.
**************************************************************************/
static void openload_script_file(const char *whichset)
{
const char *dfilename = valid_ruleset_filename(game.server.rulesetdir,
whichset, SCRIPT_SUFFIX);
if (!script_server_do_file(NULL, dfilename)) {
ruleset_error(LOG_FATAL, "\"%s\": could not load ruleset script.",
dfilename);
}
}
/**************************************************************************
Ruleset files should have a capabilities string datafile.options
This gets and returns that string, and checks that the required
capabilities specified are satisified.
**************************************************************************/
static const char *check_ruleset_capabilities(struct section_file *file,
const char *us_capstr,
const char *filename)
{
const char *datafile_options;
if (!(datafile_options = secfile_lookup_str(file, "datafile.options"))) {
log_fatal("\"%s\": ruleset capability problem:", filename);
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
if (!has_capabilities(us_capstr, datafile_options)) {
log_fatal("\"%s\": ruleset datafile appears incompatible:", filename);
log_fatal(" datafile options: %s", datafile_options);
log_fatal(" supported options: %s", us_capstr);
ruleset_error(LOG_FATAL, "Capability problem");
}
if (!has_capabilities(datafile_options, us_capstr)) {
log_fatal("\"%s\": ruleset datafile claims required option(s)"
" that we don't support:", filename);
log_fatal(" datafile options: %s", datafile_options);
log_fatal(" supported options: %s", us_capstr);
ruleset_error(LOG_FATAL, "Capability problem");
}
return datafile_options;
}
/**************************************************************************
Load a requirement list. The list is returned as a static vector
(callers need not worry about freeing anything).
**************************************************************************/
static struct requirement_vector *lookup_req_list(struct section_file *file,
const char *sec,
const char *sub,
const char *rfor)
{
const char *type, *name;
int j;
const char *filename;
static struct requirement_vector list;
filename = secfile_name(file);
requirement_vector_reserve(&list, 0);
for (j = 0; (type = secfile_lookup_str_default(file, NULL, "%s.%s%d.type",
sec, sub, j)); j++) {
char buf[MAX_LEN_NAME];
const char *range;
bool survives, negated;
struct entry *pentry;
struct requirement req;
if (!(pentry = secfile_entry_lookup(file, "%s.%s%d.name",
sec, sub, j))) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
name = NULL;
switch (entry_type(pentry)) {
case ENTRY_BOOL:
{
bool val;
if (entry_bool_get(pentry, &val)) {
fc_snprintf(buf, sizeof(buf), "%d", val);
name = buf;
}
}
break;
case ENTRY_INT:
{
int val;
if (entry_int_get(pentry, &val)) {
fc_snprintf(buf, sizeof(buf), "%d", val);
name = buf;
}
}
break;
case ENTRY_STR:
(void) entry_str_get(pentry, &name);
break;
}
if (NULL == name) {
ruleset_error(LOG_FATAL,
"\"%s\": error in handling requirement name for '%s.%s%d'.",
filename, sec, sub, j);
}
if (!(range = secfile_lookup_str(file, "%s.%s%d.range", sec, sub, j))) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
survives = secfile_lookup_bool_default(file, FALSE,
"%s.%s%d.survives", sec, sub, j);
negated = secfile_lookup_bool_default(file, FALSE,
"%s.%s%d.negated", sec, sub, j);
req = req_from_str(type, range, survives, negated, name);
if (req.source.kind == universals_n_invalid()) {
ruleset_error(LOG_FATAL, "\"%s\" [%s] has unknown req: \"%s\" \"%s\".",
filename, sec, type, name);
}
requirement_vector_append(&list, req);
}
if (j > MAX_NUM_REQS) {
ruleset_error(LOG_FATAL, "Too many (%d) requirements for %s. Max is %d",
j, rfor, MAX_NUM_REQS);
}
return &list;
}
/**************************************************************************
Lookup a string prefix.entry in the file and return the corresponding
advances pointer. If (!required), return A_NEVER for match "Never" or
can't match. If (required), die when can't match. Note the first tech
should have name "None" so that will always match.
If description is not NULL, it is used in the warning message
instead of prefix (eg pass unit->name instead of prefix="units2.u27")
**************************************************************************/
static struct advance *lookup_tech(struct section_file *file,
const char *prefix, const char *entry,
int loglevel, const char *filename,
const char *description)
{
const char *sval;
struct advance *padvance;
sval = secfile_lookup_str_default(file, NULL, "%s.%s", prefix, entry);
if (!sval || (LOG_FATAL < loglevel && strcmp(sval, "Never") == 0)) {
padvance = A_NEVER;
} else {
padvance = advance_by_rule_name(sval);
if (A_NEVER == padvance) {
ruleset_error(loglevel,
"\"%s\" %s %s: couldn't match \"%s\".",
filename, (description ? description : prefix), entry, sval);
/* ruleset_error returned only if error was not fatal. */
}
}
return padvance;
}
/**************************************************************************
Lookup a string prefix.entry in the file and return the corresponding
improvement pointer. If (!required), return B_NEVER for match "None" or
can't match. If (required), die when can't match.
If description is not NULL, it is used in the warning message
instead of prefix (eg pass unit->name instead of prefix="units2.u27")
**************************************************************************/
static struct impr_type *lookup_building(struct section_file *file,
const char *prefix, const char *entry,
int loglevel, const char *filename,
const char *description)
{
const char *sval;
struct impr_type *pimprove;
sval = secfile_lookup_str_default(file, NULL, "%s.%s", prefix, entry);
if (!sval || (LOG_FATAL < loglevel && strcmp(sval, "None") == 0)) {
pimprove = B_NEVER;
} else {
pimprove = improvement_by_rule_name(sval);
if (B_NEVER == pimprove) {
ruleset_error(loglevel,
"\"%s\" %s %s: couldn't match \"%s\".",
filename, (description ? description : prefix), entry, sval);
/* ruleset_error() returned only if error was not fatal */
}
}
return pimprove;
}
/**************************************************************************
Lookup a prefix.entry string vector in the file and fill in the
array, which should hold MAX_NUM_UNIT_LIST items. The output array is
either U_LAST terminated or full (contains MAX_NUM_UNIT_LIST
items). If the vector is not found and the required parameter is set,
we report it as an error, otherwise we just punt.
**************************************************************************/
static void lookup_unit_list(struct section_file *file, const char *prefix,
const char *entry, int loglevel,
struct unit_type **output,
const char *filename)
{
const char **slist;
size_t nval;
int i;
/* pre-fill with NULL: */
for(i = 0; i < MAX_NUM_UNIT_LIST; i++) {
output[i] = NULL;
}
slist = secfile_lookup_str_vec(file, &nval, "%s.%s", prefix, entry);
if (nval == 0) {
if (LOG_FATAL >= loglevel) {
ruleset_error(LOG_FATAL, "\"%s\": missing string vector %s.%s",
filename, prefix, entry);
}
return;
}
if (nval > MAX_NUM_UNIT_LIST) {
ruleset_error(LOG_FATAL,
"\"%s\": string vector %s.%s too long (%d, max %d)",
filename, prefix, entry, (int) nval, MAX_NUM_UNIT_LIST);
}
if (nval == 1 && strcmp(slist[0], "") == 0) {
free(slist);
return;
}
for (i = 0; i < nval; i++) {
const char *sval = slist[i];
struct unit_type *punittype = unit_type_by_rule_name(sval);
if (!punittype) {
ruleset_error(LOG_FATAL,
"\"%s\" %s.%s (%d): couldn't match \"%s\".",
filename, prefix, entry, i, sval);
}
output[i] = punittype;
log_debug("\"%s\" %s.%s (%d): %s (%d)", filename, prefix, entry, i, sval,
utype_number(punittype));
}
free(slist);
return;
}
/**************************************************************************
Lookup a prefix.entry string vector in the file and fill in the
array, which should hold MAX_NUM_TECH_LIST items. The output array is
either A_LAST terminated or full (contains MAX_NUM_TECH_LIST
items). All valid entries of the output array are guaranteed to
exist. There should be at least one value, but it may be "",
meaning empty list.
**************************************************************************/
static void lookup_tech_list(struct section_file *file, const char *prefix,
const char *entry, int *output,
const char *filename)
{
const char **slist;
size_t nval;
int i;
/* pre-fill with A_LAST: */
for(i=0; i<MAX_NUM_TECH_LIST; i++) {
output[i] = A_LAST;
}
slist = secfile_lookup_str_vec(file, &nval, "%s.%s", prefix, entry);
if (nval==0) {
ruleset_error(LOG_FATAL, "\"%s\": missing string vector %s.%s",
filename, prefix, entry);
}
if (nval>MAX_NUM_TECH_LIST) {
ruleset_error(LOG_FATAL,
"\"%s\": string vector %s.%s too long (%d, max %d)",
filename, prefix, entry, (int) nval, MAX_NUM_TECH_LIST);
}
if (nval==1 && strcmp(slist[0], "")==0) {
free(slist);
return;
}
for (i=0; i<nval; i++) {
const char *sval = slist[i];
struct advance *padvance = advance_by_rule_name(sval);
if (NULL == padvance) {
ruleset_error(LOG_FATAL,
"\"%s\" %s.%s (%d): couldn't match \"%s\".",
filename, prefix, entry, i, sval);
}
if (!valid_advance(padvance)) {
ruleset_error(LOG_FATAL, "\"%s\" %s.%s (%d): \"%s\" is removed.",
filename, prefix, entry, i, sval);
}
output[i] = advance_number(padvance);
log_debug("\"%s\" %s.%s (%d): %s (%d)", filename, prefix, entry, i, sval,
advance_number(padvance));
}
free(slist);
return;
}
/**************************************************************************
Lookup a prefix.entry string vector in the file and fill in the
array, which should hold MAX_NUM_BUILDING_LIST items. The output array is
either B_LAST terminated or full (contains MAX_NUM_BUILDING_LIST
items). [All valid entries of the output array are guaranteed to pass
improvement_exist()?] There should be at least one value, but it may be
"", meaning an empty list.
**************************************************************************/
static void lookup_building_list(struct section_file *file,
const char *prefix, const char *entry,
int *output, const char *filename)
{
const char **slist;
size_t nval;
int i;
/* pre-fill with B_LAST: */
for (i = 0; i < MAX_NUM_BUILDING_LIST; i++) {
output[i] = B_LAST;
}
slist = secfile_lookup_str_vec(file, &nval, "%s.%s", prefix, entry);
if (nval == 0) {
ruleset_error(LOG_FATAL, "\"%s\": missing string vector %s.%s",
filename, prefix, entry);
}
if (nval > MAX_NUM_BUILDING_LIST) {
ruleset_error(LOG_FATAL,
"\"%s\": string vector %s.%s too long (%d, max %d)",
filename, prefix, entry, (int) nval, MAX_NUM_BUILDING_LIST);
}
if (nval == 1 && strcmp(slist[0], "") == 0) {
free(slist);
return;
}
for (i = 0; i < nval; i++) {
const char *sval = slist[i];
struct impr_type *pimprove = improvement_by_rule_name(sval);
if (NULL == pimprove) {
ruleset_error(LOG_FATAL,
"\"%s\" %s.%s (%d): couldn't match \"%s\".",
filename, prefix, entry, i, sval);
}
output[i] = improvement_number(pimprove);
log_debug("%s.%s,%d %s %d", prefix, entry, i, sval, output[i]);
}
free(slist);
}
/**************************************************************************
Lookup a string prefix.entry in the file and return the corresponding
unit_type id. If (!required), return NULL if match "None" or can't match.
If (required), die if can't match.
If description is not NULL, it is used in the warning message
instead of prefix (eg pass unit->name instead of prefix="units2.u27")
**************************************************************************/
static struct unit_type *lookup_unit_type(struct section_file *file,
const char *prefix,
const char *entry,
int loglevel,
const char *filename,
const char *description)
{
const char *sval;
struct unit_type *punittype;
if (LOG_FATAL >= loglevel) {
sval = secfile_lookup_str(file, "%s.%s", prefix, entry);
} else {
sval = secfile_lookup_str_default(file, "None", "%s.%s", prefix, entry);
}
if (strcmp(sval, "None")==0) {
punittype = NULL;
} else {
punittype = unit_type_by_rule_name(sval);
if (!punittype) {
ruleset_error(loglevel,
"\"%s\" %s %s: couldn't match \"%s\".",
filename, (description ? description : prefix), entry, sval);
/* We continue if error was not fatal. */
punittype = NULL;
}
}
return punittype;
}
/**************************************************************************
Lookup entry in the file and return the corresponding government index;
dies if can't find/match. filename is for error message.
**************************************************************************/
static struct government *lookup_government(struct section_file *file,
const char *entry,
const char *filename,
struct government *fallback)
{
const char *sval;
struct government *gov;
sval = secfile_lookup_str_default(file, NULL, "%s", entry);
if (!sval) {
gov = fallback;
} else {
gov = government_by_rule_name(sval);
}
if (!gov) {
ruleset_error(LOG_FATAL,
"\"%s\" %s: couldn't match \"%s\".",
filename, entry, sval);
}
return gov;
}
/**************************************************************************
Lookup entry in the file and return the corresponding move_type index;
dies if can't find/match. filename is for error message.
**************************************************************************/
static enum unit_move_type lookup_move_type(struct section_file *file,
const char *entry,
const char *filename)
{
const char *sval;
enum unit_move_type mt;
sval = secfile_lookup_str_default(file, NULL, "%s", entry);
if (sval == NULL) {
return unit_move_type_invalid();
}
mt = unit_move_type_by_name(sval, fc_strcasecmp);
if (!unit_move_type_is_valid(mt)) {
ruleset_error(LOG_FATAL,
"\"%s\" %s: couldn't match \"%s\".",
filename, entry, sval);
}
return mt;
}
/****************************************************************************
Lookup optional string, returning allocated memory or NULL.
****************************************************************************/
static char *lookup_string(struct section_file *file, const char *prefix,
const char *suffix)
{
const char *sval = secfile_lookup_str(file, "%s.%s", prefix, suffix);
if (NULL != sval) {
char copy[strlen(sval) + 1];
strcpy(copy, sval);
remove_leading_trailing_spaces(copy);
if (strlen(copy) > 0) {
return fc_strdup(copy);
}
}
return NULL;
}
/****************************************************************************
Lookup optional string vector, returning allocated memory or NULL.
****************************************************************************/
static struct strvec *lookup_strvec(struct section_file *file,
const char *prefix, const char *suffix)
{
size_t dim;
const char **vec = secfile_lookup_str_vec(file, &dim,
"%s.%s", prefix, suffix);
if (NULL != vec) {
struct strvec *dest = strvec_new();
strvec_store(dest, vec, dim);
free(vec);
return dest;
}
return NULL;
}
/**************************************************************************
Look up the resource section name and return its pointer.
**************************************************************************/
static struct resource *lookup_resource(const char *filename,
const char *name,
const char *jsection)
{
resource_type_iterate(presource) {
const int i = resource_index(presource);
const char *isection = &resource_sections[i * MAX_SECTION_LABEL];
if (0 == fc_strcasecmp(isection, name)) {
return presource;
}
} resource_type_iterate_end;
ruleset_error(LOG_ERROR,
"\"%s\" [%s] has unknown \"%s\".",
filename,
jsection,
name);
return NULL;
}
/**************************************************************************
Look up the terrain section name and return its pointer.
**************************************************************************/
static struct terrain *lookup_terrain(struct section_file *file,
const char *item,
struct terrain *pthis)
{
const int j = terrain_index(pthis);
const char *jsection = &terrain_sections[j * MAX_SECTION_LABEL];
const char *name = secfile_lookup_str(file, "%s.%s", jsection, item);
if (NULL == name
|| *name == '\0'
|| (0 == strcmp(name, "none"))
|| (0 == strcmp(name, "no"))) {
return T_NONE;
}
if (0 == strcmp(name, "yes")) {
return pthis;
}
terrain_type_iterate(pterrain) {
const int i = terrain_index(pterrain);
const char *isection = &terrain_sections[i * MAX_SECTION_LABEL];
if (0 == fc_strcasecmp(isection, name)) {
return pterrain;
}
} terrain_type_iterate_end;
ruleset_error(LOG_ERROR, "\"%s\" [%s] has unknown \"%s\".",
secfile_name(file), jsection, name);
return T_NONE;
}
/**************************************************************************
Load "name" and (optionally) "rule_name" into a struct name_translation.
**************************************************************************/
static void ruleset_load_names(struct name_translation *pname,
struct section_file *file,
const char *sec_name)
{
const char *name = secfile_lookup_str(file, "%s.name", sec_name);
const char *rule_name = secfile_lookup_str(file, "%s.rule_name", sec_name);
if (!name) {
ruleset_error(LOG_FATAL,
"\"%s\" [%s]: no \"name\" specified.",
secfile_name(file), sec_name);
}
names_set(pname, name, rule_name);
}
/**************************************************************************
Load names of technologies so other rulesets can refer to techs with
their name.
**************************************************************************/
static void load_tech_names(struct section_file *file)
{
struct section_list *sec;
/* Number of techs in the ruleset (means without A_NONE). */
int num_techs = 0;
int i;
const char *filename = secfile_name(file);
(void) secfile_entry_by_path(file, "datafile.description"); /* unused */
/* The names: */
sec = secfile_sections_by_name_prefix(file, ADVANCE_SECTION_PREFIX);
if (NULL == sec || 0 == (num_techs = section_list_size(sec))) {
ruleset_error(LOG_FATAL, "\"%s\": No Advances?!?", filename);
}
log_verbose("%d advances (including possibly unused)", num_techs);
if(num_techs + A_FIRST > A_LAST_REAL) {
ruleset_error(LOG_FATAL, "\"%s\": Too many advances (%d, max %d)",
filename, num_techs, A_LAST_REAL-A_FIRST);
}
game.control.num_tech_types = num_techs + A_FIRST; /* includes A_NONE */
i = 0;
advance_iterate(A_FIRST, a) {
ruleset_load_names(&a->name, file, section_name(section_list_get(sec, i)));
i++;
} advance_iterate_end;
section_list_destroy(sec);
}
/**************************************************************************
Load technologies related ruleset data
**************************************************************************/
static void load_ruleset_techs(struct section_file *file)
{
struct section_list *sec;
int i;
struct advance *a_none = advance_by_number(A_NONE);
const char *filename = secfile_name(file);
(void) check_ruleset_capabilities(file, RULESET_CAPABILITIES, filename);
sec = secfile_sections_by_name_prefix(file, ADVANCE_SECTION_PREFIX);
/* Initialize dummy tech A_NONE */
a_none->require[AR_ONE] = a_none;
a_none->require[AR_TWO] = a_none;
a_none->require[AR_ROOT] = A_NEVER;
BV_CLR_ALL(a_none->flags);
i = 0;
advance_iterate(A_FIRST, a) {
const char *sec_name = section_name(section_list_get(sec, i));
const char *sval, **slist;
size_t nval;
int j, ival;
a->require[AR_ONE] = lookup_tech(file, sec_name, "req1", LOG_ERROR,
filename, rule_name(&a->name));
a->require[AR_TWO] = lookup_tech(file, sec_name, "req2", LOG_ERROR,
filename, rule_name(&a->name));
a->require[AR_ROOT] = lookup_tech(file, sec_name, "root_req", LOG_ERROR,
filename, rule_name(&a->name));
if ((A_NEVER == a->require[AR_ONE] && A_NEVER != a->require[AR_TWO])
|| (A_NEVER != a->require[AR_ONE] && A_NEVER == a->require[AR_TWO])) {
log_error("\"%s\" [%s] \"%s\": \"Never\" with non-\"Never\".",
filename, sec_name, rule_name(&a->name));
a->require[AR_ONE] = a->require[AR_TWO] = A_NEVER;
}
if (a_none == a->require[AR_ONE] && a_none != a->require[AR_TWO]) {
log_error("\"%s\" [%s] \"%s\": should have \"None\" second.",
filename, sec_name, rule_name(&a->name));
a->require[AR_ONE] = a->require[AR_TWO];
a->require[AR_TWO] = a_none;
}
BV_CLR_ALL(a->flags);
slist = secfile_lookup_str_vec(file, &nval, "%s.flags", sec_name);
for(j=0; j<nval; j++) {
sval = slist[j];
if(strcmp(sval,"")==0) {
continue;
}
ival = tech_flag_id_by_name(sval, fc_strcasecmp);
if (!tech_flag_id_is_valid(ival)) {
log_error("\"%s\" [%s] \"%s\": bad flag name \"%s\".",
filename, sec_name, rule_name(&a->name), sval);
} else {
BV_SET(a->flags, ival);
}
}
free(slist);
sz_strlcpy(a->graphic_str,
secfile_lookup_str_default(file, "-", "%s.graphic", sec_name));
sz_strlcpy(a->graphic_alt,
secfile_lookup_str_default(file, "-",
"%s.graphic_alt", sec_name));
a->helptext = lookup_strvec(file, sec_name, "helptext");
a->bonus_message = lookup_string(file, sec_name, "bonus_message");
a->preset_cost =
secfile_lookup_int_default(file, -1, "%s.%s", sec_name, "cost");
a->num_reqs = 0;
i++;
} advance_iterate_end;
/* Propagate a root tech up into the tech tree. Thus if a technology
* X has Y has a root tech, then any technology requiring X also has
* Y as a root tech. */
restart:
advance_iterate(A_FIRST, a) {
if (valid_advance(a)
&& A_NEVER != a->require[AR_ROOT]) {
bool out_of_order = FALSE;
/* Now find any tech depending on this technology and update its
* root_req. */
advance_iterate(A_FIRST, b) {
if (valid_advance(b)
&& A_NEVER == b->require[AR_ROOT]
&& (a == b->require[AR_ONE] || a == b->require[AR_TWO])) {
b->require[AR_ROOT] = a->require[AR_ROOT];
if (b < a) {
out_of_order = TRUE;
}
}
} advance_iterate_end;
if (out_of_order) {
/* HACK: If we just changed the root_tech of a lower-numbered
* technology, we need to go back so that we can propagate the
* root_tech up to that technology's parents... */
goto restart;
}
}
} advance_iterate_end;
/* Now rename A_NEVER to A_NONE for consistency */
advance_iterate(A_NONE, a) {
if (A_NEVER == a->require[AR_ROOT]) {
a->require[AR_ROOT] = a_none;
}
} advance_iterate_end;
/* Some more consistency checking:
Non-removed techs depending on removed techs is too
broken to fix by default, so die.
*/
advance_iterate(A_FIRST, a) {
if (valid_advance(a)) {
/* We check for recursive tech loops later,
* in build_required_techs_helper. */
if (!valid_advance(a->require[AR_ONE])) {
ruleset_error(LOG_FATAL,
"\"%s\" tech \"%s\": req1 leads to removed tech.",
filename,
advance_rule_name(a));
}
if (!valid_advance(a->require[AR_TWO])) {
ruleset_error(LOG_FATAL,
"\"%s\" tech \"%s\": req2 leads to removed tech.",
filename,
advance_rule_name(a));
}
}
} advance_iterate_end;
section_list_destroy(sec);
secfile_check_unused(file);
secfile_destroy(file);
}
/**************************************************************************
Load names of units so other rulesets can refer to units with
their name.
**************************************************************************/
static void load_unit_names(struct section_file *file)
{
struct section_list *sec;
int nval = 0;
size_t user_flags;
const char **flaglist;
int i;
const char *filename = secfile_name(file);
(void) secfile_entry_by_path(file, "datafile.description"); /* unused */
/* User unit flag names */
flaglist = secfile_lookup_str_vec(file, &user_flags, "flags.names");
if (user_flags > MAX_NUM_USER_UNIT_FLAGS) {
ruleset_error(LOG_FATAL, "\"%s\": Too many user unit type flags!",
filename);
}
for (i = 0; i < user_flags; i++) {
set_user_unit_flag_name(F_USER_FLAG_1 + i, flaglist[i]);
}
for (; i < MAX_NUM_USER_UNIT_FLAGS; i++) {
set_user_unit_flag_name(F_USER_FLAG_1 + i, NULL);
}
if (flaglist) {
free(flaglist);
}
/* Unit classes */
sec = secfile_sections_by_name_prefix(file, UNIT_CLASS_SECTION_PREFIX);
if (NULL == sec || 0 == (nval = section_list_size(sec))) {
ruleset_error(LOG_FATAL, "\"%s\": No unit classes?!?", filename);
}
log_verbose("%d unit classes", nval);
if(nval > UCL_LAST) {
ruleset_error(LOG_FATAL, "\"%s\": Too many unit classes (%d, max %d)",
filename, nval, UCL_LAST);
}
game.control.num_unit_classes = nval;
unit_class_iterate(punitclass) {
const int i = uclass_index(punitclass);
ruleset_load_names(&punitclass->name, file,
section_name(section_list_get(sec, i)));
} unit_class_iterate_end;
section_list_destroy(sec);
/* The names: */
sec = secfile_sections_by_name_prefix(file, UNIT_SECTION_PREFIX);
if (NULL == sec || 0 == (nval = section_list_size(sec))) {
ruleset_error(LOG_FATAL, "\"%s\": No unit types?!?", filename);
}
log_verbose("%d unit types (including possibly unused)", nval);
if(nval > U_LAST) {
ruleset_error(LOG_FATAL, "\"%s\": Too many unit types (%d, max %d)",
filename, nval, U_LAST);
}
game.control.num_unit_types = nval;
unit_type_iterate(punittype) {
const int i = utype_index(punittype);
ruleset_load_names(&punittype->name, file,
section_name(section_list_get(sec, i)));
} unit_type_iterate_end;
section_list_destroy(sec);
}
/**************************************************************************
Load veteran levels.
**************************************************************************/
static bool load_ruleset_veteran(struct section_file *file,
const char *path,
struct veteran_system **vsystem, char *err,
size_t err_len)
{
const char **vlist_name;
int *vlist_power, *vlist_raise, *vlist_wraise, *vlist_move;
size_t count_name, count_power, count_raise, count_wraise, count_move;
int i;
bool ret = TRUE;
/* The pointer should be uninitialised. */
if (*vsystem != NULL) {
fc_snprintf(err, err_len, "Veteran system is defined?!");
return FALSE;
}
/* Load data. */
vlist_name = secfile_lookup_str_vec(file, &count_name,
"%s.veteran_names", path);
vlist_power = secfile_lookup_int_vec(file, &count_power,
"%s.veteran_power_fact", path);
vlist_raise = secfile_lookup_int_vec(file, &count_raise,
"%s.veteran_raise_chance", path);
vlist_wraise = secfile_lookup_int_vec(file, &count_wraise,
"%s.veteran_work_raise_chance",
path);
vlist_move = secfile_lookup_int_vec(file, &count_move,
"%s.veteran_move_bonus", path);
if (count_name > MAX_VET_LEVELS) {
ret = FALSE;
fc_snprintf(err, err_len, "\"%s\": Too many veteran levels (section "
"'%s': %lu, max %d)", secfile_name(file), path,
(long unsigned)count_name, MAX_VET_LEVELS);
} else if (count_name != count_power
|| count_name != count_raise
|| count_name != count_wraise
|| count_name != count_move) {
ret = FALSE;
fc_snprintf(err, err_len, "\"%s\": Different lengths for the veteran "
"settings in section '%s'", secfile_name(file),
path);
} else if (count_name == 0) {
/* Nothing defined. */
*vsystem = NULL;
} else {
/* Generate the veteran system. */
*vsystem = veteran_system_new((int)count_name);
#define rs_sanity_veteran(_path, _entry, _i, _condition, _action) \
if (_condition) { \
log_error("Invalid veteran definition '%s.%s[%d]'!", \
_path, _entry, _i); \
log_debug("Failed check: '%s'. Update value: '%s'.", \
#_condition, #_action); \
_action; \
}
for (i = 0; i < count_name; i++) {
/* Some sanity checks. */
rs_sanity_veteran(path, "veteran_power_fact", i,
(vlist_power[i] < 0), vlist_power[i] = 0);
rs_sanity_veteran(path, "veteran_raise_chance", i,
(vlist_raise[i] < 0), vlist_raise[i] = 0);
rs_sanity_veteran(path, "veteran_work_raise_chance", i,
(vlist_wraise[i] < 0), vlist_wraise[i] = 0);
rs_sanity_veteran(path, "veteran_move_bonus", i,
(vlist_move[i] < 0), vlist_move[i] = 0);
if (i == 0) {
/* First element.*/
rs_sanity_veteran(path, "veteran_power_fact", i,
(vlist_power[i] != 100), vlist_power[i] = 100);
} else if (i == count_name - 1) {
/* Last element. */
rs_sanity_veteran(path, "veteran_power_fact", i,
(vlist_power[i] < vlist_power[i - 1]),
vlist_power[i] = vlist_power[i - 1]);
rs_sanity_veteran(path, "veteran_raise_chance", i,
(vlist_raise[i] != 0), vlist_raise[i] = 0);
rs_sanity_veteran(path, "veteran_work_raise_chance", i,
(vlist_wraise[i] != 0), vlist_wraise[i] = 0);
} else {
/* All elements inbetween. */
rs_sanity_veteran(path, "veteran_power_fact", i,
(vlist_power[i] < vlist_power[i - 1]),
vlist_power[i] = vlist_power[i - 1]);
rs_sanity_veteran(path, "veteran_raise_chance", i,
(vlist_raise[i] > 100), vlist_raise[i] = 100);
rs_sanity_veteran(path, "veteran_work_raise_chance", i,
(vlist_wraise[i] > 100), vlist_wraise[i] = 100);
}
veteran_system_definition(*vsystem, i, vlist_name[i], vlist_power[i],
vlist_move[i], vlist_raise[i],
vlist_wraise[i]);
}
#undef rs_sanity_veteran
}
if (vlist_name) {
free(vlist_name);
}
if (vlist_power) {
free(vlist_power);
}
if (vlist_raise) {
free(vlist_raise);
}
if (vlist_wraise) {
free(vlist_wraise);
}
if (vlist_move) {
free(vlist_move);
}
return ret;
}
/**************************************************************************
Load units related ruleset data.
**************************************************************************/
static void load_ruleset_units(struct section_file *file)
{
struct unit_type *u;
int j, ival;
size_t nval;
struct section_list *sec, *csec;
const char *sval, **slist;
const char *filename = secfile_name(file);
char msg[MAX_LEN_MSG];
(void) check_ruleset_capabilities(file, RULESET_CAPABILITIES, filename);
if (!load_ruleset_veteran(file, "veteran_system", &game.veteran, msg,
sizeof(msg)) || game.veteran == NULL) {
ruleset_error(LOG_FATAL, "Error loading the default veteran system: %s",
msg);
}
sec = secfile_sections_by_name_prefix(file, UNIT_SECTION_PREFIX);
nval = (NULL != sec ? section_list_size(sec) : 0);
csec = secfile_sections_by_name_prefix(file, UNIT_CLASS_SECTION_PREFIX);
nval = (NULL != csec ? section_list_size(csec) : 0);
unit_class_iterate(uc) {
int i = uclass_index(uc);
char tmp[200] = "\0";
const char *hut_str;
const char *sec_name = section_name(section_list_get(csec, i));
if (secfile_lookup_int(file, &uc->min_speed, "%s.min_speed", sec_name)) {
uc->min_speed *= SINGLE_MOVE;
} else {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
if (!secfile_lookup_int(file, &uc->hp_loss_pct,
"%s.hp_loss_pct", sec_name)) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
hut_str = secfile_lookup_str_default(file, "Normal", "%s.hut_behavior", sec_name);
if (fc_strcasecmp(hut_str, "Normal") == 0) {
uc->hut_behavior = HUT_NORMAL;
} else if (fc_strcasecmp(hut_str, "Nothing") == 0) {
uc->hut_behavior = HUT_NOTHING;
} else if (fc_strcasecmp(hut_str, "Frighten") == 0) {
uc->hut_behavior = HUT_FRIGHTEN;
} else {
ruleset_error(LOG_FATAL,
"\"%s\" unit_class \"%s\":"
" Illegal hut behavior \"%s\".",
filename,
uclass_rule_name(uc),
hut_str);
}
BV_CLR_ALL(uc->flags);
slist = secfile_lookup_str_vec(file, &nval, "%s.flags", sec_name);
for(j = 0; j < nval; j++) {
sval = slist[j];
if(strcmp(sval,"") == 0) {
continue;
}
ival = unit_class_flag_id_by_name(sval, fc_strcasecmp);
if (!unit_class_flag_id_is_valid(ival)) {
ival = unit_flag_by_rule_name(sval);
if (ival != F_LAST) {
ruleset_error(LOG_FATAL,
"\"%s\" unit_class \"%s\": unit_type flag!",
filename, uclass_rule_name(uc));
} else {
ruleset_error(LOG_FATAL,
"\"%s\" unit_class \"%s\": bad flag name \"%s\".",
filename, uclass_rule_name(uc), sval);
}
} else {
BV_SET(uc->flags, ival);
}
}
free(slist);
fc_strlcat(tmp, sec_name, 200);
fc_strlcat(tmp, ".move_type", 200);
uc->move_type = lookup_move_type(file, tmp, filename);
if (!unit_move_type_is_valid(uc->move_type)) {
/* Not explicitly given, determine automatically */
bool land_moving = FALSE;
bool sea_moving = FALSE;
if (uclass_has_flag(uc, UCF_RIVER_NATIVE)
|| uclass_has_flag(uc, UCF_ROAD_NATIVE)) {
land_moving = TRUE;
}
terrain_type_iterate(pterrain) {
bv_special spe;
bv_bases bases;
BV_CLR_ALL(spe);
BV_CLR_ALL(bases);
if (is_native_to_class(uc, pterrain, spe, bases)) {
if (is_ocean(pterrain)) {
sea_moving = TRUE;
} else {
land_moving = TRUE;
}
}
} terrain_type_iterate_end;
if (land_moving && sea_moving) {
uc->move_type = UMT_BOTH;
} else if (sea_moving) {
uc->move_type = UMT_SEA;
} else {
/* If unit has no native terrains, it is considered land moving */
uc->move_type = UMT_LAND;
}
} else if (uc->move_type == UMT_SEA) {
/* Explicitly given SEA_MOVING */
if (uclass_has_flag(uc, UCF_RIVER_NATIVE)) {
log_error("\"%s\" unit_class \"%s\": cannot give RiverNative "
"flag to sea moving unit",
filename, uclass_rule_name(uc));
BV_CLR(uc->flags, UCF_RIVER_NATIVE);
}
if (uclass_has_flag(uc, UCF_ROAD_NATIVE)) {
log_error("\"%s\" unit_class \"%s\": cannot give RoadNative "
"flag to sea moving unit",
filename, uclass_rule_name(uc));
BV_CLR(uc->flags, UCF_ROAD_NATIVE);
}
}
} unit_class_iterate_end;
/* Tech and Gov requirements; per unit veteran system */
unit_type_iterate(u) {
const int i = utype_index(u);
const struct section *psection = section_list_get(sec, i);
const char *sec_name = section_name(psection);
u->require_advance = lookup_tech(file, sec_name,
"tech_req", LOG_FATAL, filename,
rule_name(&u->name));
if (NULL != section_entry_by_name(psection, "gov_req")) {
char tmp[200] = "\0";
fc_strlcat(tmp, section_name(psection), sizeof(tmp));
fc_strlcat(tmp, ".gov_req", sizeof(tmp));
u->need_government = lookup_government(file, tmp, filename, NULL);
} else {
u->need_government = NULL; /* no requirement */
}
if (!load_ruleset_veteran(file, sec_name, &u->veteran,
msg, sizeof(msg))) {
ruleset_error(LOG_NORMAL, "Error loading the veteran system: %s",
msg);
}
u->obsoleted_by = lookup_unit_type(file, sec_name, "obsolete_by",
LOG_ERROR, filename,
rule_name(&u->name));
u->converted_to = lookup_unit_type(file, sec_name, "convert_to",
LOG_ERROR, filename,
rule_name(&u->name));
} unit_type_iterate_end;
/* main stats: */
unit_type_iterate(u) {
const int i = utype_index(u);
struct unit_class *pclass;
const char *sec_name = section_name(section_list_get(sec, i));
const char *string;
u->need_improvement = lookup_building(file, sec_name, "impr_req",
LOG_ERROR, filename,
rule_name(&u->name));
sval = secfile_lookup_str(file, "%s.class", sec_name);
pclass = unit_class_by_rule_name(sval);
if (!pclass) {
ruleset_error(LOG_FATAL,
"\"%s\" unit_type \"%s\":"
" bad class \"%s\".",
filename,
utype_rule_name(u),
sval);
}
u->uclass = pclass;
sz_strlcpy(u->sound_move,
secfile_lookup_str_default(file, "-", "%s.sound_move",
sec_name));
sz_strlcpy(u->sound_move_alt,
secfile_lookup_str_default(file, "-", "%s.sound_move_alt",
sec_name));
sz_strlcpy(u->sound_fight,
secfile_lookup_str_default(file, "-", "%s.sound_fight",
sec_name));
sz_strlcpy(u->sound_fight_alt,
secfile_lookup_str_default(file, "-", "%s.sound_fight_alt",
sec_name));
if ((string = secfile_lookup_str(file, "%s.graphic", sec_name))) {
sz_strlcpy(u->graphic_str, string);
} else {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
sz_strlcpy(u->graphic_alt,
secfile_lookup_str_default(file, "-", "%s.graphic_alt",
sec_name));
if (!secfile_lookup_int(file, &u->build_cost,
"%s.build_cost", sec_name)
|| !secfile_lookup_int(file, &u->pop_cost,
"%s.pop_cost", sec_name)
|| !secfile_lookup_int(file, &u->attack_strength,
"%s.attack", sec_name)
|| !secfile_lookup_int(file, &u->defense_strength,
"%s.defense", sec_name)
|| !secfile_lookup_int(file, &u->move_rate,
"%s.move_rate", sec_name)
|| !secfile_lookup_int(file, &u->vision_radius_sq,
"%s.vision_radius_sq", sec_name)
|| !secfile_lookup_int(file, &u->transport_capacity,
"%s.transport_cap", sec_name)
|| !secfile_lookup_int(file, &u->hp,
"%s.hitpoints", sec_name)
|| !secfile_lookup_int(file, &u->firepower,
"%s.firepower", sec_name)
|| !secfile_lookup_int(file, &u->fuel,
"%s.fuel", sec_name)
|| !secfile_lookup_int(file, &u->happy_cost,
"%s.uk_happy", sec_name)) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
u->move_rate *= SINGLE_MOVE;
if (u->firepower <= 0) {
ruleset_error(LOG_FATAL,
"\"%s\" unit_type \"%s\":"
" firepower is %d,"
" but must be at least 1. "
" If you want no attack ability,"
" set the unit's attack strength to 0.",
filename,
utype_rule_name(u),
u->firepower);
}
output_type_iterate(o) {
u->upkeep[o] = secfile_lookup_int_default(file, 0, "%s.uk_%s",
sec_name,
get_output_identifier(o));
} output_type_iterate_end;
slist = secfile_lookup_str_vec(file, &nval, "%s.cargo", sec_name);
BV_CLR_ALL(u->cargo);
for (j = 0; j < nval; j++) {
struct unit_class *uclass = unit_class_by_rule_name(slist[j]);
if (!uclass) {
ruleset_error(LOG_FATAL,
"\"%s\" unit_type \"%s\":"
"has unknown unit class %s as cargo.",
filename,
utype_rule_name(u),
slist[j]);
}
BV_SET(u->cargo, uclass_index(uclass));
}
free(slist);
slist = secfile_lookup_str_vec(file, &nval, "%s.targets", sec_name);
BV_CLR_ALL(u->targets);
for (j = 0; j < nval; j++) {
struct unit_class *uclass = unit_class_by_rule_name(slist[j]);
if (!uclass) {
ruleset_error(LOG_FATAL,
"\"%s\" unit_type \"%s\":"
"has unknown unit class %s as target.",
filename,
utype_rule_name(u),
slist[j]);
}
BV_SET(u->targets, uclass_index(uclass));
}
free(slist);
/* Set also all classes that are never unreachable as targets. */
unit_class_iterate(pclass) {
if (!uclass_has_flag(pclass, UCF_UNREACHABLE)) {
BV_SET(u->targets, uclass_index(pclass));
}
} unit_class_iterate_end;
u->helptext = lookup_strvec(file, sec_name, "helptext");
u->paratroopers_range = secfile_lookup_int_default(file,
0, "%s.paratroopers_range", sec_name);
u->paratroopers_mr_req = SINGLE_MOVE * secfile_lookup_int_default(file,
0, "%s.paratroopers_mr_req", sec_name);
u->paratroopers_mr_sub = SINGLE_MOVE * secfile_lookup_int_default(file,
0, "%s.paratroopers_mr_sub", sec_name);
u->bombard_rate = secfile_lookup_int_default(file,
0, "%s.bombard_rate", sec_name);
u->city_size = secfile_lookup_int_default(file,
1, "%s.city_size", sec_name);
} unit_type_iterate_end;
/* flags */
unit_type_iterate(u) {
const int i = utype_index(u);
BV_CLR_ALL(u->flags);
fc_assert(!utype_has_flag(u, F_LAST - 1));
slist = secfile_lookup_str_vec(file, &nval, "%s.flags",
section_name(section_list_get(sec, i)));
for(j=0; j<nval; j++) {
sval = slist[j];
if (0 == strcmp(sval, "")) {
continue;
}
ival = unit_flag_by_rule_name(sval);
if (F_LAST == ival) {
log_error("\"%s\" unit_type \"%s\": bad flag name \"%s\".",
filename, utype_rule_name(u), sval);
ival = unit_class_flag_id_by_name(sval, fc_strcasecmp);
if (unit_class_flag_id_is_valid(ival)) {
ruleset_error(LOG_FATAL,
"\"%s\" unit_type \"%s\": unit_class flag!",
filename, utype_rule_name(u));
} else {
ruleset_error(LOG_FATAL,
"\"%s\" unit_type \"%s\": bad flag name \"%s\".",
filename, utype_rule_name(u), sval);
}
} else {
BV_SET(u->flags, ival);
}
fc_assert(utype_has_flag(u, ival));
}
free(slist);
} unit_type_iterate_end;
/* roles */
unit_type_iterate(u) {
const int i = utype_index(u);
BV_CLR_ALL(u->roles);
slist = secfile_lookup_str_vec(file, &nval, "%s.roles",
section_name(section_list_get(sec, i)));
for(j=0; j<nval; j++) {
sval = slist[j];
if(strcmp(sval,"")==0) {
continue;
}
ival = unit_role_by_rule_name(sval);
if (ival==L_LAST) {
log_error("\"%s\" unit_type \"%s\": bad role name \"%s\".",
filename, utype_rule_name(u), sval);
} else if ((ival == L_FERRYBOAT || ival == L_BARBARIAN_BOAT)
&& u->uclass->move_type == UMT_LAND) {
log_error( "\"%s\" unit_type \"%s\": role \"%s\" "
"for land moving unit.",
filename, utype_rule_name(u), sval);
} else {
BV_SET(u->roles, ival - L_FIRST);
}
fc_assert(utype_has_role(u, ival));
}
free(slist);
} unit_type_iterate_end;
/* Some more consistency checking: */
unit_type_iterate(u) {
if (!valid_advance(u->require_advance)) {
log_error("\"%s\" unit_type \"%s\": depends on removed tech \"%s\".",
filename, utype_rule_name(u),
advance_rule_name(u->require_advance));
u->require_advance = A_NEVER;
}
if (utype_has_flag(u, F_SETTLERS)
&& u->city_size <= 0) {
ruleset_error(LOG_ERROR, "\"%s\": Unit %s would build size %d cities",
filename, utype_rule_name(u), u->city_size);
u->city_size = 1;
}
} unit_type_iterate_end;
/* Setup roles and flags pre-calcs: */
role_unit_precalcs();
/* Check some required flags and roles etc: */
if(num_role_units(F_CITIES)==0) {
ruleset_error(LOG_FATAL, "\"%s\": No flag=cities units?", filename);
}
if(num_role_units(F_SETTLERS)==0) {
ruleset_error(LOG_FATAL, "\"%s\": No flag=settler units?", filename);
}
if(num_role_units(L_EXPLORER)==0) {
ruleset_error(LOG_FATAL, "\"%s\": No role=explorer units?", filename);
}
if(num_role_units(L_FERRYBOAT)==0) {
ruleset_error(LOG_FATAL, "\"%s\": No role=ferryboat units?", filename);
}
if(num_role_units(L_FIRSTBUILD)==0) {
ruleset_error(LOG_FATAL, "\"%s\": No role=firstbuild units?", filename);
}
if (0 == num_role_units(L_BARBARIAN)
&& BARBS_DISABLED != game.server.barbarianrate) {
ruleset_error(LOG_FATAL, "\"%s\": No role=barbarian units?", filename);
}
if (0 == num_role_units(L_BARBARIAN_LEADER)
&& BARBS_DISABLED != game.server.barbarianrate) {
ruleset_error(LOG_FATAL, "\"%s\": No role=barbarian leader units?", filename);
}
if (0 == num_role_units(L_BARBARIAN_BUILD)
&& BARBS_DISABLED != game.server.barbarianrate) {
ruleset_error(LOG_FATAL, "\"%s\": No role=barbarian build units?", filename);
}
if (0 == num_role_units(L_BARBARIAN_BOAT)
&& BARBS_DISABLED != game.server.barbarianrate) {
ruleset_error(LOG_FATAL, "\"%s\": No role=barbarian ship units?", filename);
} else if (num_role_units(L_BARBARIAN_BOAT) > 0) {
u = get_role_unit(L_BARBARIAN_BOAT,0);
if(utype_move_type(u) != UMT_SEA) {
ruleset_error(LOG_FATAL,
"\"%s\": Barbarian boat (%s) needs to be a sea unit.",
filename,
utype_rule_name(u));
}
}
if (0 == num_role_units(L_BARBARIAN_SEA)
&& BARBS_DISABLED != game.server.barbarianrate) {
ruleset_error(LOG_FATAL, "\"%s\": No role=sea raider barbarian units?",
filename);
}
section_list_destroy(csec);
section_list_destroy(sec);
secfile_check_unused(file);
secfile_destroy(file);
}
/**************************************************************************
Load names of buildings so other rulesets can refer to buildings with
their name.
**************************************************************************/
static void load_building_names(struct section_file *file)
{
struct section_list *sec;
int i, nval = 0;
const char *filename = secfile_name(file);
(void) secfile_entry_by_path(file, "datafile.description"); /* unused */
/* The names: */
sec = secfile_sections_by_name_prefix(file, BUILDING_SECTION_PREFIX);
if (NULL == sec || 0 == (nval = section_list_size(sec))) {
ruleset_error(LOG_FATAL, "\"%s\": No improvements?!?", filename);
}
log_verbose("%d improvement types (including possibly unused)", nval);
if (nval > B_LAST) {
ruleset_error(LOG_FATAL, "\"%s\": Too many improvements (%d, max %d)",
filename, nval, B_LAST);
}
game.control.num_impr_types = nval;
for (i = 0; i < nval; i++) {
struct impr_type *b = improvement_by_number(i);
ruleset_load_names(&b->name, file, section_name(section_list_get(sec, i)));
}
section_list_destroy(sec);
}
/**************************************************************************
Load buildings related ruleset data
**************************************************************************/
static void load_ruleset_buildings(struct section_file *file)
{
struct section_list *sec;
const char *item;
int i, nval;
const char *filename = secfile_name(file);
(void) check_ruleset_capabilities(file, RULESET_CAPABILITIES, filename);
sec = secfile_sections_by_name_prefix(file, BUILDING_SECTION_PREFIX);
nval = (NULL != sec ? section_list_size(sec) : 0);
for (i = 0; i < nval; i++) {
struct impr_type *b = improvement_by_number(i);
const char *sec_name = section_name(section_list_get(sec, i));
struct requirement_vector *reqs =
lookup_req_list(file, sec_name, "reqs",
improvement_rule_name(b));
const char *sval, **slist;
int j, ival;
size_t nflags;
item = secfile_lookup_str(file, "%s.genus", sec_name);
b->genus = impr_genus_id_by_name(item, fc_strcasecmp);
if (!impr_genus_id_is_valid(b->genus)) {
ruleset_error(LOG_FATAL, "\"%s\" improvement \"%s\": couldn't match "
"genus \"%s\".", filename,
improvement_rule_name(b), item);
}
slist = secfile_lookup_str_vec(file, &nflags, "%s.flags", sec_name);
BV_CLR_ALL(b->flags);
for(j=0; j<nflags; j++) {
sval = slist[j];
if(strcmp(sval,"")==0) {
continue;
}
ival = impr_flag_id_by_name(sval, fc_strcasecmp);
if (!impr_flag_id_is_valid(ival)) {
ruleset_error(LOG_FATAL,
"\"%s\" improvement \"%s\": bad flag name \"%s\".",
filename, improvement_rule_name(b), sval);
} else {
BV_SET(b->flags, ival);
}
}
free(slist);
requirement_vector_copy(&b->reqs, reqs);
b->obsolete_by = lookup_tech(file, sec_name, "obsolete_by", LOG_ERROR,
filename, rule_name(&b->name));
if (advance_by_number(A_NONE) == b->obsolete_by) {
/*
* The ruleset can specify "None" for a never-obsoleted
* improvement. Currently this means A_NONE, which is an
* unnecessary special-case. We use A_NEVER to flag a
* never-obsoleted improvement in the code instead.
* (Test for valid_advance() later.)
*/
b->obsolete_by = A_NEVER;
}
b->replaced_by = lookup_building(file, sec_name, "replaced_by",
LOG_ERROR, filename,
rule_name(&b->name));
if (!secfile_lookup_int(file, &b->build_cost,
"%s.build_cost", sec_name)
|| !secfile_lookup_int(file, &b->upkeep,
"%s.upkeep", sec_name)
|| !secfile_lookup_int(file, &b->sabotage,
"%s.sabotage", sec_name)) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
sz_strlcpy(b->graphic_str,
secfile_lookup_str_default(file, "-",
"%s.graphic", sec_name));
sz_strlcpy(b->graphic_alt,
secfile_lookup_str_default(file, "-",
"%s.graphic_alt", sec_name));
sz_strlcpy(b->soundtag,
secfile_lookup_str_default(file, "-",
"%s.sound", sec_name));
sz_strlcpy(b->soundtag_alt,
secfile_lookup_str_default(file, "-",
"%s.sound_alt", sec_name));
b->helptext = lookup_strvec(file, sec_name, "helptext");
b->allows_units = FALSE;
unit_type_iterate(ut) {
if (ut->need_improvement == b) {
b->allows_units = TRUE;
break;
}
} unit_type_iterate_end;
}
/* Some more consistency checking: */
improvement_iterate(b) {
if (valid_improvement(b)) {
if (A_NEVER != b->obsolete_by
&& !valid_advance(b->obsolete_by)) {
log_error("\"%s\" improvement \"%s\": obsoleted by "
"removed tech \"%s\".",
filename, improvement_rule_name(b),
advance_rule_name(b->obsolete_by));
b->obsolete_by = A_NEVER;
}
}
} improvement_iterate_end;
section_list_destroy(sec);
secfile_check_unused(file);
secfile_destroy(file);
}
/**************************************************************************
Load names of terrain types so other rulesets can refer to terrains with
their name.
**************************************************************************/
static void load_terrain_names(struct section_file *file)
{
int nval = 0;
struct section_list *sec;
const char *filename = secfile_name(file);
(void) secfile_entry_by_path(file, "datafile.description"); /* unused */
/* terrain names */
sec = secfile_sections_by_name_prefix(file, TERRAIN_SECTION_PREFIX);
if (NULL == sec || 0 == (nval = section_list_size(sec))) {
ruleset_error(LOG_FATAL, "\"%s\": ruleset doesn't have any terrains.",
filename);
}
if (nval > MAX_NUM_TERRAINS) {
ruleset_error(LOG_FATAL, "\"%s\": Too many terrains (%d, max %d)",
filename, nval, MAX_NUM_TERRAINS);
}
game.control.terrain_count = nval;
/* avoid re-reading files */
if (terrain_sections) {
free(terrain_sections);
}
terrain_sections = fc_calloc(nval, MAX_SECTION_LABEL);
terrain_type_iterate(pterrain) {
const int i = terrain_index(pterrain);
const char *sec_name = section_name(section_list_get(sec, i));
ruleset_load_names(&pterrain->name, file, sec_name);
if (0 == strcmp(rule_name(&pterrain->name), "unused")) {
name_set(&pterrain->name, "");
}
section_strlcpy(&terrain_sections[i * MAX_SECTION_LABEL], sec_name);
} terrain_type_iterate_end;
section_list_destroy(sec);
/* resource names */
sec = secfile_sections_by_name_prefix(file, RESOURCE_SECTION_PREFIX);
nval = (NULL != sec ? section_list_size(sec) : 0);
if (nval > MAX_NUM_RESOURCES) {
ruleset_error(LOG_FATAL, "\"%s\": Too many resources (%d, max %d)",
filename, nval, MAX_NUM_RESOURCES);
}
game.control.resource_count = nval;
/* avoid re-reading files */
if (resource_sections) {
free(resource_sections);
}
resource_sections = fc_calloc(nval, MAX_SECTION_LABEL);
resource_type_iterate(presource) {
const int i = resource_index(presource);
const char *sec_name = section_name(section_list_get(sec, i));
ruleset_load_names(&presource->name, file, sec_name);
if (0 == strcmp(rule_name(&presource->name), "unused")) {
name_set(&presource->name, "");
}
section_strlcpy(&resource_sections[i * MAX_SECTION_LABEL], sec_name);
} resource_type_iterate_end;
if (NULL != sec) {
section_list_destroy(sec);
}
/* base names */
sec = secfile_sections_by_name_prefix(file, BASE_SECTION_PREFIX);
nval = (NULL != sec ? section_list_size(sec) : 0);
if (nval > MAX_BASE_TYPES) {
ruleset_error(LOG_FATAL, "\"%s\": Too many base types (%d, max %d)",
filename, nval, MAX_BASE_TYPES);
}
game.control.num_base_types = nval;
if (base_sections) {
free(base_sections);
}
base_sections = fc_calloc(nval, MAX_SECTION_LABEL);
base_type_iterate(pbase) {
const int i = base_index(pbase);
const char *sec_name = section_name(section_list_get(sec, i));
ruleset_load_names(&pbase->name, file, sec_name);
section_strlcpy(&base_sections[i * MAX_SECTION_LABEL], sec_name);
} base_type_iterate_end;
if (NULL != sec) {
section_list_destroy(sec);
}
}
/**************************************************************************
Load terrain types related ruleset data
**************************************************************************/
static void load_ruleset_terrain(struct section_file *file)
{
struct strvec *psv;
size_t nval;
int j;
const char **res;
const char *filename = secfile_name(file);
/* char *datafile_options = */ (void)
check_ruleset_capabilities(file, RULESET_CAPABILITIES, filename);
/* options */
terrain_control.may_road =
secfile_lookup_bool_default(file, TRUE, "options.may_road");
terrain_control.may_irrigate =
secfile_lookup_bool_default(file, TRUE, "options.may_irrigate");
terrain_control.may_mine =
secfile_lookup_bool_default(file, TRUE, "options.may_mine");
terrain_control.may_transform =
secfile_lookup_bool_default(file, TRUE, "options.may_transform");
/* parameters */
terrain_control.ocean_reclaim_requirement_pct
= secfile_lookup_int_default(file, 101,
"parameters.ocean_reclaim_requirement");
terrain_control.land_channel_requirement_pct
= secfile_lookup_int_default(file, 101,
"parameters.land_channel_requirement");
terrain_control.lake_max_size
= secfile_lookup_int_default(file, 0,
"parameters.lake_max_size");
map.server.ocean_resources
= secfile_lookup_bool_default(file, FALSE,
"parameters.ocean_resources");
terrain_control.river_move_mode =
secfile_lookup_int_default(file, RMV_FAST_STRICT, "parameters.river_move_mode");
terrain_control.river_defense_bonus =
secfile_lookup_int_default(file, 50, "parameters.river_defense_bonus");
terrain_control.river_trade_incr =
secfile_lookup_int_default(file, 1, "parameters.river_trade_incr");
psv = lookup_strvec(file, "parameters", "river_help_text");
PACKET_STRVEC_COMPUTE(terrain_control.river_help_text, psv);
if (NULL != psv) {
strvec_destroy(psv);
}
terrain_control.road_superhighway_trade_bonus =
secfile_lookup_int_default(file, 50, "parameters.road_superhighway_trade_bonus");
output_type_iterate(o) {
terrain_control.rail_tile_bonus[o] =
secfile_lookup_int_default(file, 0, "parameters.rail_%s_bonus",
get_output_identifier(o));
terrain_control.pollution_tile_penalty[o]
= secfile_lookup_int_default(file, 50,
"parameters.pollution_%s_penalty",
get_output_identifier(o));
terrain_control.fallout_tile_penalty[o]
= secfile_lookup_int_default(file, 50,
"parameters.fallout_%s_penalty",
get_output_identifier(o));
} output_type_iterate_end;
/* terrain details */
terrain_type_iterate(pterrain) {
const char **slist;
const int i = terrain_index(pterrain);
const char *tsection = &terrain_sections[i * MAX_SECTION_LABEL];
sz_strlcpy(pterrain->graphic_str,
secfile_lookup_str(file,"%s.graphic", tsection));
sz_strlcpy(pterrain->graphic_alt,
secfile_lookup_str(file,"%s.graphic_alt", tsection));
pterrain->identifier
= secfile_lookup_str(file, "%s.identifier", tsection)[0];
if ('\0' == pterrain->identifier) {
ruleset_error(LOG_FATAL, "\"%s\" [%s] identifier missing value.",
filename, tsection);
}
if (TERRAIN_UNKNOWN_IDENTIFIER == pterrain->identifier) {
ruleset_error(LOG_FATAL,
"\"%s\" [%s] cannot use '%c' as an identifier;"
" it is reserved for unknown terrain.",
filename, tsection, pterrain->identifier);
}
for (j = T_FIRST; j < i; j++) {
if (pterrain->identifier == terrain_by_number(j)->identifier) {
ruleset_error(LOG_FATAL,
"\"%s\" [%s] has the same identifier as [%s].",
filename,
tsection,
&terrain_sections[j * MAX_SECTION_LABEL]);
}
}
if (!secfile_lookup_int(file, &pterrain->movement_cost,
"%s.movement_cost", tsection)
|| !secfile_lookup_int(file, &pterrain->defense_bonus,
"%s.defense_bonus", tsection)) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
output_type_iterate(o) {
pterrain->output[o]
= secfile_lookup_int_default(file, 0, "%s.%s", tsection,
get_output_identifier(o));
} output_type_iterate_end;
res = secfile_lookup_str_vec(file, &nval, "%s.resources", tsection);
pterrain->resources = fc_calloc(nval + 1, sizeof(*pterrain->resources));
for (j = 0; j < nval; j++) {
pterrain->resources[j] = lookup_resource(filename, res[j], tsection);
}
pterrain->resources[nval] = NULL;
free(res);
res = NULL;
if (!secfile_lookup_int(file, &pterrain->road_trade_incr,
"%s.road_trade_incr", tsection)
|| !secfile_lookup_int(file, &pterrain->road_time,
"%s.road_time", tsection)) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
pterrain->irrigation_result
= lookup_terrain(file, "irrigation_result", pterrain);
if (!secfile_lookup_int(file, &pterrain->irrigation_food_incr,
"%s.irrigation_food_incr", tsection)
|| !secfile_lookup_int(file, &pterrain->irrigation_time,
"%s.irrigation_time", tsection)) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
pterrain->mining_result
= lookup_terrain(file, "mining_result", pterrain);
if (!secfile_lookup_int(file, &pterrain->mining_shield_incr,
"%s.mining_shield_incr", tsection)
|| !secfile_lookup_int(file, &pterrain->mining_time,
"%s.mining_time", tsection)) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
pterrain->transform_result
= lookup_terrain(file, "transform_result", pterrain);
if (!secfile_lookup_int(file, &pterrain->transform_time,
"%s.transform_time", tsection)) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
pterrain->rail_time
= secfile_lookup_int_default(file, 3, "%s.rail_time", tsection);
pterrain->clean_pollution_time
= secfile_lookup_int_default(file, 3, "%s.clean_pollution_time", tsection);
pterrain->clean_fallout_time
= secfile_lookup_int_default(file, 3, "%s.clean_fallout_time", tsection);
pterrain->warmer_wetter_result
= lookup_terrain(file, "warmer_wetter_result", pterrain);
pterrain->warmer_drier_result
= lookup_terrain(file, "warmer_drier_result", pterrain);
pterrain->cooler_wetter_result
= lookup_terrain(file, "cooler_wetter_result", pterrain);
pterrain->cooler_drier_result
= lookup_terrain(file, "cooler_drier_result", pterrain);
slist = secfile_lookup_str_vec(file, &nval, "%s.flags", tsection);
BV_CLR_ALL(pterrain->flags);
for (j = 0; j < nval; j++) {
const char *sval = slist[j];
enum terrain_flag_id flag
= terrain_flag_id_by_name(sval, fc_strcasecmp);
if (!terrain_flag_id_is_valid(flag)) {
ruleset_error(LOG_FATAL, "\"%s\" [%s] has unknown flag \"%s\".",
filename, tsection, sval);
} else {
BV_SET(pterrain->flags, flag);
}
}
free(slist);
{
enum mapgen_terrain_property mtp;
for (mtp = mapgen_terrain_property_begin();
mtp != mapgen_terrain_property_end();
mtp = mapgen_terrain_property_next(mtp)) {
pterrain->property[mtp]
= secfile_lookup_int_default(file, 0, "%s.property_%s", tsection,
mapgen_terrain_property_name(mtp));
}
}
slist = secfile_lookup_str_vec(file, &nval, "%s.native_to", tsection);
BV_CLR_ALL(pterrain->native_to);
for (j = 0; j < nval; j++) {
struct unit_class *class = unit_class_by_rule_name(slist[j]);
if (!class) {
ruleset_error(LOG_FATAL,
"\"%s\" [%s] is native to unknown unit class \"%s\".",
filename, tsection, slist[j]);
} else {
BV_SET(pterrain->native_to, uclass_index(class));
}
}
free(slist);
/* get terrain color */
{
fc_assert_ret(pterrain->rgb == NULL);
if (!rgbcolor_load(file, &pterrain->rgb, "%s.color", tsection)) {
ruleset_error(LOG_FATAL, "Missing terrain color definition: %s",
secfile_error());
}
}
pterrain->helptext = lookup_strvec(file, tsection, "helptext");
} terrain_type_iterate_end;
/* resource details */
resource_type_iterate(presource) {
char identifier[MAX_LEN_NAME];
const int i = resource_index(presource);
const char *rsection = &resource_sections[i * MAX_SECTION_LABEL];
output_type_iterate (o) {
presource->output[o] =
secfile_lookup_int_default(file, 0, "%s.%s", rsection,
get_output_identifier(o));
} output_type_iterate_end;
sz_strlcpy(presource->graphic_str,
secfile_lookup_str(file,"%s.graphic", rsection));
sz_strlcpy(presource->graphic_alt,
secfile_lookup_str(file,"%s.graphic_alt", rsection));
sz_strlcpy(identifier,
secfile_lookup_str(file,"%s.identifier", rsection));
presource->identifier = identifier[0];
if (RESOURCE_NULL_IDENTIFIER == presource->identifier) {
ruleset_error(LOG_FATAL, "\"%s\" [%s] identifier missing value.",
filename, rsection);
}
if (RESOURCE_NONE_IDENTIFIER == presource->identifier) {
ruleset_error(LOG_FATAL,
"\"%s\" [%s] cannot use '%c' as an identifier;"
" it is reserved.",
filename, rsection, presource->identifier);
}
for (j = 0; j < i; j++) {
if (presource->identifier == resource_by_number(j)->identifier) {
ruleset_error(LOG_FATAL,
"\"%s\" [%s] has the same identifier as [%s].",
filename,
rsection,
&resource_sections[j * MAX_SECTION_LABEL]);
}
}
} resource_type_iterate_end;
/* base details */
base_type_iterate(pbase) {
BV_CLR_ALL(pbase->conflicts);
} base_type_iterate_end;
base_type_iterate(pbase) {
const char *section = &base_sections[base_index(pbase) * MAX_SECTION_LABEL];
int j;
const char **slist;
struct requirement_vector *reqs;
const char *gui_str;
pbase->buildable = secfile_lookup_bool_default(file, TRUE,
"%s.buildable", section);
pbase->pillageable = secfile_lookup_bool_default(file, TRUE,
"%s.pillageable", section);
sz_strlcpy(pbase->graphic_str,
secfile_lookup_str_default(file, "-", "%s.graphic", section));
sz_strlcpy(pbase->graphic_alt,
secfile_lookup_str_default(file, "-",
"%s.graphic_alt", section));
sz_strlcpy(pbase->activity_gfx,
secfile_lookup_str_default(file, "-",
"%s.activity_gfx", section));
reqs = lookup_req_list(file, section, "reqs", base_rule_name(pbase));
requirement_vector_copy(&pbase->reqs, reqs);
slist = secfile_lookup_str_vec(file, &nval, "%s.native_to", section);
BV_CLR_ALL(pbase->native_to);
for (j = 0; j < nval; j++) {
struct unit_class *class = unit_class_by_rule_name(slist[j]);
if (!class) {
ruleset_error(LOG_FATAL,
"\"%s\" base \"%s\" is native to unknown unit class \"%s\".",
filename,
base_rule_name(pbase),
slist[j]);
} else {
BV_SET(pbase->native_to, uclass_index(class));
}
}
free(slist);
gui_str = secfile_lookup_str(file,"%s.gui_type", section);
pbase->gui_type = base_gui_type_by_name(gui_str, fc_strcasecmp);
if (!base_gui_type_is_valid(pbase->gui_type)) {
ruleset_error(LOG_FATAL, "\"%s\" base \"%s\": unknown gui_type \"%s\".",
filename,
base_rule_name(pbase),
gui_str);
}
if (!secfile_lookup_int(file, &pbase->build_time,
"%s.build_time", section)) {
ruleset_error(LOG_FATAL, "%s", secfile_error());
}
pbase->border_sq = secfile_lookup_int_default(file, -1, "%s.border_sq",
section);
pbase->vision_main_sq = secfile_lookup_int_default(file, -1,
"%s.vision_main_sq",
section);
pbase->vision_invis_sq = secfile_lookup_int_default(file, -1,
"%s.vision_invis_sq",
section);
pbase->defense_bonus = secfile_lookup_int_default(file, 0,
"%s.defense_bonus",
section);
slist = secfile_lookup_str_vec(file, &nval, "%s.flags", section);
BV_CLR_ALL(pbase->flags);
for (j = 0; j < nval; j++) {
const char *sval = slist[j];
enum base_flag_id flag = base_flag_id_by_name(sval, fc_strcasecmp);
if (!base_flag_id_is_valid(flag)) {
ruleset_error(LOG_FATAL, "\"%s\" base \"%s\": unknown flag \"%s\".",
filename,
base_rule_name(pbase),
sval);
} else {
BV_SET(pbase->flags, flag);
}
}
free(slist);
slist = secfile_lookup_str_vec(file, &nval, "%s.conflicts", section);
for (j = 0; j < nval; j++) {
const char *sval = slist[j];
struct base_type *pbase2 = base_type_by_rule_name(sval);
if (pbase2 == NULL) {
ruleset_error(LOG_FATAL, "\"%s\" base \"%s\": unknown conflict base \"%s\".",
filename,
base_rule_name(pbase),
sval);
} else {
BV_SET(pbase->conflicts, base_index(pbase2));
BV_SET(pbase2->conflicts, base_index(pbase));
}
}
free(slist);
if (territory_claiming_base(pbase)) {
base_type_iterate(pbase2) {
if (pbase == pbase2) {
/* End of the fully initialized bases iteration. */
break;
}
if (territory_claiming_base(pbase2)) {
BV_SET(pbase->conflicts, base_index(pbase2));
BV_SET(pbase2->conflicts, base_index(pbase));
}
} base_type_iterate_end;
}
pbase->helptext = lookup_strvec(file, section, "helptext");
} base_type_iterate_end;
secfile_check_unused(file);
secfile_destroy(file);
}
/**************************************************************************
Load names of governments so other rulesets can refer to governments with
their name.
**************************************************************************/
static void load_government_names(struct section_file *file)
{
int nval = 0;
struct section_list *sec;
const char *filename = secfile_name(file);
(void) secfile_entry_by_path(file, "datafile.description"); /* unused */
sec = secfile_sections_by_name_prefix(file, GOVERNMENT_SECTION_PREFIX);
if (NULL == sec || 0 == (nval = section_list_size(sec))) {
ruleset_error(LOG_FATAL, "\"%s\": No governments?!?", filename);
} else if(nval > G_MAGIC) {
/* upper limit is really about 255 for 8-bit id values, but
use G_MAGIC elsewhere as a sanity check, and should be plenty
big enough --dwp */
ruleset_error(LOG_FATAL, "\"%s\": Too many governments (%d, max %d)",
filename, nval, G_MAGIC);
}
governments_alloc(nval);
/* Government names are needed early so that get_government_by_name will
* work. */
governments_iterate(gov) {
const char *sec_name =
section_name(section_list_get(sec, government_index(gov)));
ruleset_load_names(&gov->name, file, sec_name);
} governments_iterate_end;
section_list_destroy(sec);
}
/**************************************************************************
This loads information from given governments.ruleset
**************************************************************************/
static void load_ruleset_governments(struct section_file *file)
{
struct section_list *sec;
const char *filename = secfile_name(file);
(void) check_ruleset_capabilities(file, RULESET_CAPABILITIES, filename);
sec = secfile_sections_by_name_prefix(file, GOVERNMENT_SECTION_PREFIX);
game.government_during_revolution
= lookup_government(file, "governments.during_revolution", filename, NULL);
game.info.government_during_revolution_id =
government_number(game.government_during_revolution);
/* easy ones: */
governments_iterate(g) {
const int i = government_index(g);
const char *sec_name = section_name(section_list_get(sec, i));
struct requirement_vector *reqs =
lookup_req_list(file, sec_name, "reqs", government_rule_name(g));
if (NULL != secfile_entry_lookup(file, "%s.ai_better", sec_name)) {
char entry[100];
fc_snprintf(entry, sizeof(entry), "%s.ai_better", sec_name);
g->ai.better = lookup_government(file, entry, filename, NULL);
} else {
g->ai.better = NULL;
}
requirement_vector_copy(&g->reqs, reqs);
sz_strlcpy(g->graphic_str,
secfile_lookup_str(file, "%s.graphic", sec_name));
sz_strlcpy(g->graphic_alt,
secfile_lookup_str(file, "%s.graphic_alt", sec_name));
g->helptext = lookup_strvec(file, sec_name, "helptext");
} governments_iterate_end;
/* titles */
governments_iterate(g) {
const char *sec_name =
section_name(section_list_get(sec, government_index(g)));
const char *male, *female;
if (!(male = secfile_lookup_str(file, "%s.ruler_male_title", sec_name))
|| !(female = secfile_lookup_str(file, "%s.ruler_female_title",
sec_name))) {
ruleset_error(LOG_FATAL, "Lack of default ruler titles for "
"government \"%s\" (nb %d): %s",
government_rule_name(g), government_number(g),
secfile_error());
} else if (NULL == government_ruler_title_new(g, NULL, male, female)) {
ruleset_error(LOG_FATAL, "Lack of default ruler titles for "
"government \"%s\" (nb %d).",
government_rule_name(g), government_number(g));
}
} governments_iterate_end;
section_list_destroy(sec);
secfile_check_unused(file);
secfile_destroy(file);
}
/**************************************************************************
Send information in packet_ruleset_control (numbers of units etc, and
other miscellany) to specified connections.
The client assumes that exactly one ruleset control packet is sent as
a part of each ruleset send. So after sending this packet we have to
resend every other part of the rulesets (and none of them should be
is-info in the network code!). The client frees ruleset data when
receiving this packet and then re-initializes as it receives the
individual ruleset packets. See packhand.c.
**************************************************************************/
static void send_ruleset_control(struct conn_list *dest)
{
struct packet_ruleset_control packet;
packet = game.control;
lsend_packet_ruleset_control(dest, &packet);
}
/****************************************************************************
This checks if nations[pos] leader names are not already defined in any
previous nation, or twice in its own leader name table.
If not return NULL, if yes return pointer to name which is repeated
and id of a conflicting nation as second parameter.
****************************************************************************/
static const char *check_leader_names(struct nation_type *pnation,
struct nation_type **ppconflict_nation)
{
nation_leader_list_iterate(nation_leaders(pnation), pleader) {
const char *name = nation_leader_name(pleader);
nation_leader_list_iterate(nation_leaders(pnation), prev_leader) {
if (prev_leader == pleader) {
break;
} else if (0 == fc_strcasecmp(name, nation_leader_name(prev_leader))) {
*ppconflict_nation = pnation;
return name;
}
} nation_leader_list_iterate_end;
} nation_leader_list_iterate_end;
nations_iterate(prev_nation) {
if (prev_nation == pnation) {
break;
}
nation_leader_list_iterate(nation_leaders(prev_nation), pleader) {
const char *name = nation_leader_name(pleader);
nation_leader_list_iterate(nation_leaders(prev_nation), prev_leader) {
if (prev_leader == pleader) {
break;
} else if (0 == fc_strcasecmp(name,
nation_leader_name(prev_leader))) {
*ppconflict_nation = prev_nation;
return name;
}
} nation_leader_list_iterate_end;
} nation_leader_list_iterate_end;
} nations_iterate_end;
return NULL;
}
/**************************************************************************
Load names of nations so other rulesets can refer to nations with
their name.
**************************************************************************/
static void load_nation_names(struct section_file *file)
{
struct section_list *sec;
int j;
(void) secfile_entry_by_path(file, "datafile.description"); /* unused */
sec = secfile_sections_by_name_prefix(file, NATION_SECTION_PREFIX);
if (NULL == sec) {
ruleset_error(LOG_FATAL, "No available nations in this ruleset!");
}
game.control.nation_count = section_list_size(sec);
nations_alloc(game.control.nation_count);
nations_iterate(pl) {
const int i = nation_index(pl);
const char *sec_name = section_name(section_list_get(sec, i));
const char *noun_plural = secfile_lookup_str(file,
"%s.plural", sec_name);
ruleset_load_names(&pl->adjective, file, sec_name);
name_set(&pl->noun_plural, noun_plural);
/* Check if nation name is already defined. */
for(j = 0; j < i; j++) {
struct nation_type *n2 = nation_by_number(j);
/* Compare strings after stripping off qualifiers -- we don't want
* two nations to end up with identical adjectives displayed to users.
* (This check only catches English, not localisations, of course.) */
if (0 == strcmp(Qn_(untranslated_name(&n2->adjective)),
Qn_(untranslated_name(&pl->adjective)))) {
ruleset_error(LOG_FATAL,
"Two nations defined with the same adjective \"%s\": "
"in section \'%s\' and section \'%s\'",
Qn_(untranslated_name(&pl->adjective)),
section_name(section_list_get(sec, j)), sec_name);
} else if (0 == strcmp(rule_name(&n2->adjective),
rule_name(&pl->adjective))) {
/* We cannot have the same rule name, as the game needs them to be
* distinct. */
ruleset_error(LOG_FATAL,
"Two nations defined with the same rule_name \"%s\": "
"in section \'%s\' and section \'%s\'",
rule_name(&pl->adjective),
section_name(section_list_get(sec, j)), sec_name);
} else if (0 == strcmp(Qn_(untranslated_name(&n2->noun_plural)),
Qn_(untranslated_name(&pl->noun_plural)))) {
/* We don't want identical English plural names either. */
ruleset_error(LOG_FATAL,
"Two nations defined with the same plural name \"%s\": "
"in section \'%s\' and section \'%s\'",
Qn_(untranslated_name(&pl->noun_plural)),
section_name(section_list_get(sec, j)), sec_name);
}
}
} nations_iterate_end;
section_list_destroy(sec);
}
/****************************************************************************
This function loads a city name list from a section file. The file and
two section names (which will be concatenated) are passed in. The
malloc'ed city name list (which is all filled out) will be returned.
****************************************************************************/
static void load_city_name_list(struct section_file *file,
struct nation_type *pnation,
const char *secfile_str1,
const char *secfile_str2)
{
size_t dim, j;
const char **cities = secfile_lookup_str_vec(file, &dim, "%s.%s",
secfile_str1, secfile_str2);
/* Each string will be of the form "<cityname> (<label>, <label>, ...)".
* The cityname is just the name for this city, while each "label" matches
* a terrain type for the city (or "river"), with a preceeding ! to negate
* it. The parentheses are optional (but necessary to have the settings,
* of course). Our job is now to parse it. */
for (j = 0; j < dim; j++) {
size_t len = strlen(cities[j]);
char city_name[len + 1], *p, *next, *end;
struct nation_city *pncity;
sz_strlcpy(city_name, cities[j]);
/* Now we wish to determine values for all of the city labels. A value
* of NCP_NONE means no preference (which is necessary so that the use
* of this is optional); NCP_DISLIKE means the label is negated and
* NCP_LIKE means it's labelled. Mostly the parsing just involves
* a lot of ugly string handling... */
if ((p = strchr(city_name, '('))) {
*p++ = '\0';
if (!(end = strchr(p, ')'))) {
ruleset_error(LOG_ERROR, "\"%s\" [%s] %s: city name \"%s\" "
"unmatched parenthesis.", secfile_name(file),
secfile_str1, secfile_str2, cities[j]);
}
for (*end++ = '\0'; '\0' != *end; end++) {
if (!fc_isspace(*end)) {
ruleset_error(LOG_ERROR, "\"%s\" [%s] %s: city name \"%s\" "
"contains characthers after last parenthesis, "
"ignoring...", secfile_name(file), secfile_str1,
secfile_str2, cities[j]);
}
}
}
/* Build the nation_city. */
remove_leading_trailing_spaces(city_name);
if (check_name(city_name)) {
/* The ruleset contains a name that is too long. This shouldn't
* happen - if it does, the author should get immediate feedback. */
ruleset_error(LOG_ERROR, "\"%s\" [%s] %s: city name \"%s\" "
"is too long; shortening it.", secfile_name(file),
secfile_str1, secfile_str2, city_name);
city_name[MAX_LEN_NAME - 1] = '\0';
}
pncity = nation_city_new(pnation, city_name);
if (NULL != p) {
/* Handle the labels one at a time. */
do {
enum nation_city_preference prefer;
if ((next = strchr(p, ','))) {
*next = '\0';
}
remove_leading_trailing_spaces(p);
/* The ! is used to mark a negative, which is recorded with
* NCP_DISLIKE. Otherwise we use a NCP_LIKE.
*/
if (*p == '!') {
p++;
prefer = NCP_DISLIKE;
} else {
prefer = NCP_LIKE;
}
if (0 == fc_strcasecmp(p, "river")) {
nation_city_set_river_preference(pncity, prefer);
} else {
const struct terrain *pterrain = terrain_by_rule_name(p);
if (NULL == pterrain) {
/* Try with removing frequent trailing 's'. */
size_t l = strlen(p);
if (0 < l && 's' == fc_tolower(p[l - 1])) {
p[l - 1] = '\0';
}
pterrain = terrain_by_rule_name(p);
}
if (NULL != pterrain) {
nation_city_set_terrain_preference(pncity, pterrain, prefer);
} else {
/* Nation authors may use terrains like "lake" that are
* available in the default ruleset but not in civ1/civ2.
* In normal use we should just ignore hints for unknown
* terrains, but nation authors may want to know about this
* to spot typos etc. */
log_verbose("\"%s\" [%s] %s: terrain \"%s\" not found;"
" skipping it.",
secfile_name(file), secfile_str1, secfile_str2, p);
}
}
p = next ? next + 1 : NULL;
} while (NULL != p && '\0' != *p);
}
}
if (NULL != cities) {
free(cities);
}
}
/**************************************************************************
Load nations.ruleset file
**************************************************************************/
static void load_ruleset_nations(struct section_file *file)
{
struct government *gov;
int j;
size_t dim;
char temp_name[MAX_LEN_NAME];
const char **vec;
const char *name, *bad_leader;
int barb_land_count = 0;
int barb_sea_count = 0;
bool warn_city_style;
const char *sval;
struct government *default_government = NULL;
const char *filename = secfile_name(file);
struct section_list *sec;
(void) check_ruleset_capabilities(file, RULESET_CAPABILITIES, filename);
warn_city_style
= secfile_lookup_bool_default(file, TRUE,
"compatibility.warn_city_style");
sval = secfile_lookup_str_default(file, NULL,
"compatibility.default_government");
if (sval != NULL) {
default_government = government_by_rule_name(sval);
}
set_allowed_nation_groups(NULL);
sec = secfile_sections_by_name_prefix(file, NATION_SET_SECTION_PREFIX);
if (sec) {
section_list_iterate(sec, psection) {
struct nation_group *pset;
name = secfile_lookup_str(file, "%s.name", section_name(psection));
if (NULL == name) {
ruleset_error(LOG_FATAL, "Error: %s", secfile_error());
}
pset = nation_group_new(name);
nation_group_set_set(pset, TRUE);
} section_list_iterate_end;
section_list_destroy(sec);
}
sec = secfile_sections_by_name_prefix(file, NATION_GROUP_SECTION_PREFIX);
section_list_iterate(sec, psection) {
struct nation_group *pgroup;
name = secfile_lookup_str(file, "%s.name", section_name(psection));
if (NULL == name) {
ruleset_error(LOG_FATAL, "Error: %s", secfile_error());
}
pgroup = nation_group_new(name);
if (!secfile_lookup_int(file, &j, "%s.match", section_name(psection))) {
ruleset_error(LOG_FATAL, "Error: %s", secfile_error());
}
nation_group_set_match(pgroup, j);
} section_list_iterate_end;
section_list_destroy(sec);
sec = secfile_sections_by_name_prefix(file, NATION_SECTION_PREFIX);
nations_iterate(pnation) {
struct nation_type *pconflict;
const int i = nation_index(pnation);
char tmp[200] = "\0";
const char *barb_type;
const char *sec_name = section_name(section_list_get(sec, i));
/* Nation groups. */
vec = secfile_lookup_str_vec(file, &dim, "%s.groups", sec_name);
for (j = 0; j < dim; j++) {
struct nation_group *pgroup = nation_group_by_rule_name(vec[j]);
if (NULL != pgroup) {
nation_group_list_append(pnation->groups, pgroup);
} else {
/* For nation authors, this would probably be considered an error.
* But it can happen normally. The civ1 compatibility ruleset only
* uses the nations that were in civ1, so not all of the links will
* exist. */
log_verbose("Nation %s: Unknown group \"%s\".",
nation_rule_name(pnation), vec[j]);
}
}
if (NULL != vec) {
free(vec);
}
/* Nation conflicts. */
vec = secfile_lookup_str_vec(file, &dim, "%s.conflicts_with", sec_name);
for (j = 0; j < dim; j++) {
pconflict = nation_by_rule_name(vec[j]);
if (pnation == pconflict) {
ruleset_error(LOG_ERROR, "Nation %s conflicts with itself",
nation_rule_name(pnation));
} else if (NULL != pconflict) {
nation_list_append(pnation->server.conflicts_with, pconflict);
} else {
/* For nation authors, this would probably be considered an error.
* But it can happen normally. The civ1 compatibility ruleset only
* uses the nations that were in civ1, so not all of the links will
* exist. */
log_verbose("Nation %s: conflicts_with nation \"%s\" is unknown.",
nation_rule_name(pnation), vec[j]);
}
}
if (NULL != vec) {
free(vec);
}
/* Nation leaders. */
for (j = 0; j < MAX_NUM_LEADERS; j++) {
const char *sex;
bool is_male = FALSE;
name = secfile_lookup_str(file, "%s.leaders%d.name", sec_name, j);
if (NULL == name) {
/* No more to read. */
break;
}
if (check_name(name)) {
/* The ruleset contains a name that is too long. This shouldn't
* happen - if it does, the author should get immediate feedback */
sz_strlcpy(temp_name, name);
ruleset_error(LOG_ERROR, "Nation %s: leader name \"%s\" "
"is too long; shortening it to \"%s\".",
nation_rule_name(pnation), name, temp_name);
name = temp_name;
}
sex = secfile_lookup_str(file, "%s.leaders%d.sex", sec_name, j);
if (NULL == sex) {
ruleset_error(LOG_FATAL, "Nation %s: leader \"%s\": %s.",
nation_rule_name(pnation), name, secfile_error());
} else if (0 == fc_strcasecmp("Male", sex)) {
is_male = TRUE;
} else if (0 != fc_strcasecmp("Female", sex)) {
ruleset_error(LOG_FATAL, "Nation %s: leader \"%s\" has unsupported "
"sex variant \"%s\".",
nation_rule_name(pnation), name, sex);
}
(void) nation_leader_new(pnation, name, is_male);
}
/* Check the number of leaders. */
if (MAX_NUM_LEADERS == j) {
/* Too much leaders, get the real number defined in the ruleset. */
while (NULL != secfile_entry_lookup(file, "%s.leaders%d.name",
sec_name, j)) {
j++;
}
log_error("Nation %s: Too many leaders; using %d of %d",
nation_rule_name(pnation), MAX_NUM_LEADERS, j);
} else if (0 == j) {
ruleset_error(LOG_FATAL,
"Nation %s: no leaders; at least one is required.",
nation_rule_name(pnation));
}
/* Check if leader name is not already defined */
if ((bad_leader = check_leader_names(pnation, &pconflict))) {
if (pnation == pconflict) {
ruleset_error(LOG_FATAL,
"Nation %s: leader \"%s\" defined more than once.",
nation_rule_name(pnation), bad_leader);
} else {
ruleset_error(LOG_FATAL,
"Nations %s and %s share the same leader \"%s\".",
nation_rule_name(pnation), nation_rule_name(pconflict),
bad_leader);
}
}
pnation->is_available =
secfile_lookup_bool_default(file, TRUE, "%s.is_available", sec_name);
pnation->is_playable =
secfile_lookup_bool_default(file, TRUE, "%s.is_playable", sec_name);
if (pnation->is_playable) {
server.playable_nations++;
}
/* Check barbarian type. Default is "None" meaning not a barbarian */
barb_type = secfile_lookup_str_default(file, "None",
"%s.barbarian_type", sec_name);
if (fc_strcasecmp(barb_type, "None") == 0) {
pnation->barb_type = NOT_A_BARBARIAN;
} else if (fc_strcasecmp(barb_type, "Land") == 0) {
if (pnation->is_playable) {
/* We can't allow players to use barbarian nations, barbarians
* may run out of nations */
ruleset_error(LOG_FATAL,
"Nation %s marked both barbarian and playable.",
nation_rule_name(pnation));
}
pnation->barb_type = LAND_BARBARIAN;
barb_land_count++;
} else if (fc_strcasecmp(barb_type, "Sea") == 0) {
if (pnation->is_playable) {
/* We can't allow players to use barbarian nations, barbarians
* may run out of nations */
ruleset_error(LOG_FATAL,
"Nation %s marked both barbarian and playable.",
nation_rule_name(pnation));
}
pnation->barb_type = SEA_BARBARIAN;
barb_sea_count++;
} else {
ruleset_error(LOG_FATAL,
"Nation %s, barbarian_type is \"%s\". Must be "
"\"None\" or \"Land\" or \"Sea\".",
nation_rule_name(pnation), barb_type);
}
/* Flags */
sz_strlcpy(pnation->flag_graphic_str,
secfile_lookup_str_default(file, "-", "%s.flag", sec_name));
sz_strlcpy(pnation->flag_graphic_alt,
secfile_lookup_str_default(file, "-",
"%s.flag_alt", sec_name));
/* Ruler titles */
for (j = 0;; j++) {
const char *male, *female;
name = secfile_lookup_str_default(file, NULL,
"%s.ruler_titles%d.government",
sec_name, j);
if (NULL == name) {
/* End of the list of ruler titles. */
break;
}
/* NB: even if the government doesn't exist, we load the entries for
* the ruler titles to avoid warnings about unused entries. */
male = secfile_lookup_str(file, "%s.ruler_titles%d.male_title",
sec_name, j);
female = secfile_lookup_str(file, "%s.ruler_titles%d.female_title",
sec_name, j);
gov = government_by_rule_name(name);
if (NULL == gov) {
/* log_verbose() rather than log_error() so that can use single
* nation ruleset file with variety of government ruleset files: */
log_verbose("Nation %s: government \"%s\" not found.",
nation_rule_name(pnation), name);
} else if (NULL != male && NULL != female) {
(void) government_ruler_title_new(gov, pnation, male, female);
} else {
ruleset_error(LOG_ERROR, "%s", secfile_error());
}
}
/* City styles */
name = secfile_lookup_str(file, "%s.city_style", sec_name);
pnation->city_style = city_style_by_rule_name(name);
if (0 > pnation->city_style) {
if (warn_city_style) {
log_error("Nation %s: city style \"%s\" is unknown, using default.",
nation_rule_name(pnation), name);
}
pnation->city_style = 0;
}
while (city_style_has_requirements(city_styles + pnation->city_style)) {
if (pnation->city_style == 0) {
ruleset_error(LOG_FATAL,
"Nation %s: the default city style is not available "
"from the beginning!", nation_rule_name(pnation));
/* Note that we can't use temp_name here. */
}
log_error("Nation %s: city style \"%s\" is not available "
"from beginning; using default.",
nation_rule_name(pnation), name);
pnation->city_style = 0;
}
/* Civilwar nations */
vec = secfile_lookup_str_vec(file, &dim,
"%s.civilwar_nations", sec_name);
for (j = 0; j < dim; j++) {
pconflict = nation_by_rule_name(vec[j]);
/* No test for duplicate nations is performed. If there is a duplicate
* entry it will just cause that nation to have an increased
* probability of being chosen. */
if (pconflict == pnation) {
ruleset_error(LOG_ERROR, "Nation %s is its own civil war nation",
nation_rule_name(pnation));
} else if (NULL != pconflict) {
nation_list_append(pnation->server.civilwar_nations, pconflict);
nation_list_append(pconflict->server.parent_nations, pnation);
} else {
/* For nation authors, this would probably be considered an error.
* But it can happen normally. The civ1 compatability ruleset only
* uses the nations that were in civ1, so not all of the links will
* exist. */
log_verbose("Nation %s: civil war nation \"%s\" is unknown.",
nation_rule_name(pnation), vec[j]);
}
}
if (NULL != vec) {
free(vec);
}
/* Load nation specific initial items */
lookup_tech_list(file, sec_name, "init_techs",
pnation->init_techs, filename);
lookup_building_list(file, sec_name, "init_buildings",
pnation->init_buildings, filename);
lookup_unit_list(file, sec_name, "init_units", LOG_ERROR,
pnation->init_units, filename);
fc_strlcat(tmp, sec_name, 200);
fc_strlcat(tmp, ".init_government", 200);
pnation->init_government = lookup_government(file, tmp, filename,
default_government);
/* Read default city names. */
load_city_name_list(file, pnation, sec_name, "cities");
pnation->legend = fc_strdup(secfile_lookup_str(file, "%s.legend",
sec_name));
if (check_strlen(pnation->legend, MAX_LEN_MSG, NULL)) {
ruleset_error(LOG_ERROR,
"Nation %s: legend \"%s\" is too long;"
" shortening it.", nation_rule_name(pnation),
pnation->legend);
pnation->legend[MAX_LEN_MSG - 1] = '\0';
}
pnation->player = NULL;
} nations_iterate_end;
section_list_destroy(sec);
secfile_check_unused(file);
secfile_destroy(file);
if (barb_land_count == 0) {
ruleset_error(LOG_FATAL,
"No land barbarian nation defined. At least one required!");
}
if (barb_sea_count == 0) {
ruleset_error(LOG_FATAL,
"No sea barbarian nation defined. At least one required!");
}
}
/**************************************************************************
Load names of city styles so other rulesets can refer to city styles with
their name.
**************************************************************************/
static void load_citystyle_names(struct section_file *file)
{
struct section_list *styles;
int i = 0;
(void) secfile_entry_by_path(file, "datafile.description"); /* unused */
/* The sections: */
styles = secfile_sections_by_name_prefix(file, CITYSTYLE_SECTION_PREFIX);
if (NULL != styles) {
city_styles_alloc(section_list_size(styles));
section_list_iterate(styles, style) {
ruleset_load_names(&city_styles[i].name, file, section_name(style));
i++;
} section_list_iterate_end;
section_list_destroy(styles);
} else {
city_styles_alloc(0);
}
}
/**************************************************************************
Load cities.ruleset file
**************************************************************************/
static void load_ruleset_cities(struct section_file *file)
{
const char *replacement;
int i;
const char *filename = secfile_name(file);
const char *item;
struct section_list *sec;
(void) check_ruleset_capabilities(file, RULESET_CAPABILITIES, filename);
/* Specialist options */
sec = secfile_sections_by_name_prefix(file, SPECIALIST_SECTION_PREFIX);
if (section_list_size(sec) >= SP_MAX) {
ruleset_error(LOG_FATAL, "\"%s\": Too many specialists (%d, max %d).",
filename, section_list_size(sec), SP_MAX);
}
game.control.num_specialist_types = section_list_size(sec);
i = 0;
section_list_iterate(sec, psection) {
struct specialist *s = specialist_by_number(i);
struct requirement_vector *reqs;
const char *sec_name = section_name(psection);
ruleset_load_names(&s->name, file, sec_name);
item = secfile_lookup_str_default(file, untranslated_name(&s->name),
"%s.short_name", sec_name);
name_set(&s->abbreviation, item);
reqs = lookup_req_list(file, sec_name, "reqs", specialist_rule_name(s));
requirement_vector_copy(&s->reqs, reqs);
s->helptext = lookup_strvec(file, sec_name, "helptext");
if (requirement_vector_size(&s->reqs) == 0 && DEFAULT_SPECIALIST == -1) {
DEFAULT_SPECIALIST = i;
}
i++;
} section_list_iterate_end;
if (DEFAULT_SPECIALIST == -1) {
ruleset_error(LOG_FATAL,
"\"%s\": must give a min_size of 0 for at least one "
"specialist type.", filename);
}
section_list_destroy(sec);
/* City Parameters */
game.info.celebratesize =
secfile_lookup_int_default(file, GAME_DEFAULT_CELEBRATESIZE,
"parameters.celebrate_size_limit");
game.info.add_to_size_limit =
secfile_lookup_int_default(file, 9, "parameters.add_to_size_limit");
game.info.angrycitizen =
secfile_lookup_bool_default(file, GAME_DEFAULT_ANGRYCITIZEN,
"parameters.angry_citizens");
game.info.changable_tax =
secfile_lookup_bool_default(file, TRUE, "parameters.changable_tax");
game.info.forced_science =
secfile_lookup_int_default(file, 0, "parameters.forced_science");
game.info.forced_luxury =
secfile_lookup_int_default(file, 100, "parameters.forced_luxury");
game.info.forced_gold =
secfile_lookup_int_default(file, 0, "parameters.forced_gold");
if (game.info.forced_science + game.info.forced_luxury
+ game.info.forced_gold != 100) {
ruleset_error(LOG_FATAL,
"\"%s\": Forced taxes do not add up in ruleset!",
filename);
}
/* civ1 & 2 didn't reveal tiles */
game.server.vision_reveal_tiles =
secfile_lookup_bool_default(file, FALSE, "parameters.vision_reveal_tiles");
/* Citizens configuration. */
game.info.citizen_nationality =
secfile_lookup_bool_default(file, FALSE,
"citizen.nationality");
/* City Styles ... */
sec = secfile_sections_by_name_prefix(file, CITYSTYLE_SECTION_PREFIX);
/* Get rest: */
for (i = 0; i < game.control.styles_count; i++) {
struct requirement_vector *reqs;
const char *sec_name = section_name(section_list_get(sec, i));
sz_strlcpy(city_styles[i].graphic,
secfile_lookup_str(file, "%s.graphic", sec_name));
sz_strlcpy(city_styles[i].graphic_alt,
secfile_lookup_str(file, "%s.graphic_alt", sec_name));
sz_strlcpy(city_styles[i].oceanic_graphic,
secfile_lookup_str_default(file, "",
"%s.oceanic_graphic", sec_name));
sz_strlcpy(city_styles[i].oceanic_graphic_alt,
secfile_lookup_str_default(file, "",
"%s.oceanic_graphic_alt",
sec_name));
sz_strlcpy(city_styles[i].citizens_graphic,
secfile_lookup_str_default(file, "-",
"%s.citizens_graphic", sec_name));
sz_strlcpy(city_styles[i].citizens_graphic_alt,
secfile_lookup_str_default(file, "generic",
"%s.citizens_graphic_alt", sec_name));
reqs = lookup_req_list(file, sec_name, "reqs", city_style_rule_name(i));
requirement_vector_copy(&city_styles[i].reqs, reqs);
replacement = secfile_lookup_str(file, "%s.replaced_by", sec_name);
if(0 == strcmp(replacement, "-")) {
city_styles[i].replaced_by = -1;
} else {
city_styles[i].replaced_by = city_style_by_rule_name(replacement);
if (city_styles[i].replaced_by < 0) {
log_error("\"%s\": style \"%s\" replacement \"%s\" not found",
filename, city_style_rule_name(i), replacement);
}
}
}
section_list_destroy(sec);
secfile_check_unused(file);
secfile_destroy(file);
}
/**************************************************************************
Load effects.ruleset file
**************************************************************************/
static void load_ruleset_effects(struct section_file *file)
{
struct section_list *sec;
const char *type;
const char *filename;
filename = secfile_name(file);
(void) check_ruleset_capabilities(file, RULESET_CAPABILITIES, filename);
(void) secfile_entry_by_path(file, "datafile.description"); /* unused */
/* Parse effects and add them to the effects ruleset cache. */
sec = secfile_sections_by_name_prefix(file, EFFECT_SECTION_PREFIX);
section_list_iterate(sec, psection) {
enum effect_type eff;
int value;
struct effect *peffect;
const char *sec_name = section_name(psection);
type = secfile_lookup_str(file, "%s.name", sec_name);
if (!type) {
log_error("\"%s\" [%s] missing effect name.", filename, sec_name);
continue;
}
eff = effect_type_by_name(type, fc_strcasecmp);
if (!effect_type_is_valid(eff)) {
log_error("\"%s\" [%s] lists unknown effect type \"%s\".",
filename, sec_name, type);
continue;
}
value = secfile_lookup_int_default(file, 1, "%s.value", sec_name);
peffect = effect_new(eff, value);
requirement_vector_iterate(lookup_req_list(file, sec_name, "reqs", type),
req) {
struct requirement *preq = fc_malloc(sizeof(*preq));
*preq = *req;
effect_req_append(peffect, FALSE, preq);
} requirement_vector_iterate_end;
requirement_vector_iterate(lookup_req_list(file, sec_name,
"nreqs", type),
req) {
struct requirement *preq = fc_malloc(sizeof(*preq));
*preq = *req;
effect_req_append(peffect, TRUE, preq);
} requirement_vector_iterate_end;
} section_list_iterate_end;
section_list_destroy(sec);
secfile_check_unused(file);
secfile_destroy(file);
}
/**************************************************************************
Print an error message if the value is out of range.
**************************************************************************/
static int secfile_lookup_int_default_min_max(struct section_file *file,
int def, int min, int max,
const char *path, ...)
fc__attribute((__format__ (__printf__, 5, 6)));
static int secfile_lookup_int_default_min_max(struct section_file *file,
int def, int min, int max,
const char *path, ...)
{
char fullpath[256];
int ival;
va_list args;
va_start(args, path);
fc_vsnprintf(fullpath, sizeof(fullpath), path, args);
va_end(args);
if (!secfile_lookup_int(file, &ival, "%s", fullpath)) {
ival = def;
}
if (ival < min) {
ruleset_error(LOG_ERROR,"\"%s\" should be in the interval [%d, %d] "
"but is %d; using the minimal value.",
fullpath, min, max, ival);
ival = min;
}
if (ival > max) {
ruleset_error(LOG_ERROR,"\"%s\" should be in the interval [%d, %d] "
"but is %d; using the maximal value.",
fullpath, min, max, ival);
ival = max;
}
return ival;
}
/**************************************************************************
Load ruleset file.
**************************************************************************/
static void load_ruleset_game(void)
{
struct section_file *file;
const char *sval, **svec;
const char *filename;
int *food_ini;
int i;
size_t teams;
const char *text;
size_t gni_tmp;
file = openload_ruleset_file("game");
filename = secfile_name(file);
/* section: datafile */
(void) check_ruleset_capabilities(file, RULESET_CAPABILITIES, filename);
(void) secfile_entry_by_path(file, "datafile.description"); /* unused */
/* section: tileset */
text = secfile_lookup_str_default(file, "", "tileset.prefered");
if (text[0] != '\0') {
/* There was tileset suggestion */
sz_strlcpy(game.control.prefered_tileset, text);
} else {
/* No tileset suggestions */
game.control.prefered_tileset[0] = '\0';
}
/* section: about */
text = secfile_lookup_str(file, "about.name");
/* Ruleset/modpack name found */
sz_strlcpy(game.control.name, text);
text = secfile_lookup_str_default(file, "", "about.description");
if (text[0] != '\0') {
/* Ruleset/modpack description found */
sz_strlcpy(game.control.description, text);
} else {
/* No description */
game.control.description[0] = '\0';
}
/* section: options */
lookup_tech_list(file, "options", "global_init_techs",
game.rgame.global_init_techs, filename);
lookup_building_list(file, "options", "global_init_buildings",
game.rgame.global_init_buildings, filename);
/* section: civstyle */
game.info.base_pollution
= secfile_lookup_int_default(file, RS_DEFAULT_BASE_POLLUTION,
"civstyle.base_pollution");
game.info.happy_cost
= secfile_lookup_int_def_min_max(file,
RS_DEFAULT_HAPPY_COST,
RS_MIN_HAPPY_COST,
RS_MAX_HAPPY_COST,
"civstyle.happy_cost");
game.info.food_cost
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_FOOD_COST,
RS_MIN_FOOD_COST,
RS_MAX_FOOD_COST,
"civstyle.food_cost");
/* TODO: move to global_unit_options */
game.info.base_bribe_cost
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_BASE_BRIBE_COST,
RS_MIN_BASE_BRIBE_COST,
RS_MAX_BASE_BRIBE_COST,
"civstyle.base_bribe_cost");
/* TODO: move to global_unit_options */
game.server.ransom_gold
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_RANSOM_GOLD,
RS_MIN_RANSOM_GOLD,
RS_MAX_RANSOM_GOLD,
"civstyle.ransom_gold");
/* TODO: move to global_unit_options */
game.info.pillage_select
= secfile_lookup_bool_default(file, RS_DEFAULT_PILLAGE_SELECT,
"civstyle.pillage_select");
/* TODO: move to global_unit_options */
game.server.upgrade_veteran_loss
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_UPGRADE_VETERAN_LOSS,
RS_MIN_UPGRADE_VETERAN_LOSS,
RS_MAX_UPGRADE_VETERAN_LOSS,
"civstyle.upgrade_veteran_loss");
/* TODO: move to global_unit_options */
game.server.autoupgrade_veteran_loss
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_UPGRADE_VETERAN_LOSS,
RS_MIN_UPGRADE_VETERAN_LOSS,
RS_MAX_UPGRADE_VETERAN_LOSS,
"civstyle.autoupgrade_veteran_loss");
/* TODO: move to new section research */
game.info.base_tech_cost
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_BASE_TECH_COST,
RS_MIN_BASE_TECH_COST,
RS_MAX_BASE_TECH_COST,
"civstyle.base_tech_cost");
food_ini = secfile_lookup_int_vec(file, &gni_tmp,
"civstyle.granary_food_ini");
game.info.granary_num_inis = (int) gni_tmp;
if (game.info.granary_num_inis > MAX_GRANARY_INIS) {
ruleset_error(LOG_FATAL,
"Too many granary_food_ini entries (%d, max %d)",
game.info.granary_num_inis, MAX_GRANARY_INIS);
} else if (game.info.granary_num_inis == 0) {
log_error("No values for granary_food_ini. Using default "
"value %d.", RS_DEFAULT_GRANARY_FOOD_INI);
game.info.granary_num_inis = 1;
game.info.granary_food_ini[0] = RS_DEFAULT_GRANARY_FOOD_INI;
} else {
int i;
/* check for <= 0 entries */
for (i = 0; i < game.info.granary_num_inis; i++) {
if (food_ini[i] <= 0) {
if (i == 0) {
food_ini[i] = RS_DEFAULT_GRANARY_FOOD_INI;
} else {
food_ini[i] = food_ini[i - 1];
}
log_error("Bad value for granary_food_ini[%i]. Using %i.",
i, food_ini[i]);
}
game.info.granary_food_ini[i] = food_ini[i];
}
}
free(food_ini);
game.info.granary_food_inc
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_GRANARY_FOOD_INC,
RS_MIN_GRANARY_FOOD_INC,
RS_MAX_GRANARY_FOOD_INC,
"civstyle.granary_food_inc");
output_type_iterate(o) {
game.info.min_city_center_output[o]
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_CITY_CENTER_OUTPUT,
RS_MIN_CITY_CENTER_OUTPUT,
RS_MAX_CITY_CENTER_OUTPUT,
"civstyle.min_city_center_%s",
get_output_identifier(o));
} output_type_iterate_end;
sval = secfile_lookup_str(file, "civstyle.nuke_contamination" );
if (fc_strcasecmp(sval, "Pollution") == 0) {
game.server.nuke_contamination = CONTAMINATION_POLLUTION;
} else if (fc_strcasecmp(sval, "Fallout") == 0) {
game.server.nuke_contamination = CONTAMINATION_FALLOUT;
} else {
log_error("Bad value %s for nuke_contamination. Using "
"\"Pollution\".", sval);
game.server.nuke_contamination = CONTAMINATION_POLLUTION;
}
game.server.init_vis_radius_sq
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_VIS_RADIUS_SQ,
RS_MIN_VIS_RADIUS_SQ,
RS_MAX_VIS_RADIUS_SQ,
"civstyle.init_vis_radius_sq");
game.info.init_city_radius_sq
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_CITY_RADIUS_SQ,
RS_MIN_CITY_RADIUS_SQ,
RS_MAX_CITY_RADIUS_SQ,
"civstyle.init_city_radius_sq");
game.info.gold_upkeep_style
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_GOLD_UPKEEP_STYLE,
RS_MIN_GOLD_UPKEEP_STYLE,
RS_MAX_GOLD_UPKEEP_STYLE,
"civstyle.gold_upkeep_style");
/* TODO: move to new section research */
game.info.tech_cost_style
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_TECH_COST_STYLE,
RS_MIN_TECH_COST_STYLE,
RS_MAX_TECH_COST_STYLE,
"civstyle.tech_cost_style");
/* TODO: move to new section research */
game.info.tech_leakage
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_TECH_LEAKAGE,
RS_MIN_TECH_LEAKAGE,
RS_MAX_TECH_LEAKAGE,
"civstyle.tech_leakage");
if (game.info.tech_cost_style == 0 && game.info.tech_leakage != 0) {
log_error("Only tech_leakage 0 supported with tech_cost_style 0.");
log_error("Switching to tech_leakage 0.");
game.info.tech_leakage = 0;
}
/* section: illness */
game.info.illness_on
= secfile_lookup_bool_default(file, RS_DEFAULT_ILLNESS_ON,
"illness.illness_on");
game.info.illness_base_factor
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_ILLNESS_BASE_FACTOR,
RS_MIN_ILLNESS_BASE_FACTOR,
RS_MAX_ILLNESS_BASE_FACTOR,
"illness.illness_base_factor");
game.info.illness_min_size
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_ILLNESS_MIN_SIZE,
RS_MIN_ILLNESS_MIN_SIZE,
RS_MAX_ILLNESS_MIN_SIZE,
"illness.illness_min_size");
game.info.illness_trade_infection
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_ILLNESS_TRADE_INFECTION_PCT,
RS_MIN_ILLNESS_TRADE_INFECTION_PCT,
RS_MAX_ILLNESS_TRADE_INFECTION_PCT,
"illness.illness_trade_infection");
game.info.illness_pollution_factor
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_ILLNESS_POLLUTION_PCT,
RS_MIN_ILLNESS_POLLUTION_PCT,
RS_MAX_ILLNESS_POLLUTION_PCT,
"illness.illness_pollution_factor");
/* section: incite_cost */
game.server.base_incite_cost
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_INCITE_BASE_COST,
RS_MIN_INCITE_BASE_COST,
RS_MAX_INCITE_BASE_COST,
"incite_cost.base_incite_cost");
game.server.incite_improvement_factor
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_INCITE_IMPROVEMENT_FCT,
RS_MIN_INCITE_IMPROVEMENT_FCT,
RS_MAX_INCITE_IMPROVEMENT_FCT,
"incite_cost.improvement_factor");
game.server.incite_unit_factor
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_INCITE_UNIT_FCT,
RS_MIN_INCITE_UNIT_FCT,
RS_MAX_INCITE_UNIT_FCT,
"incite_cost.unit_factor");
game.server.incite_total_factor
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_INCITE_TOTAL_FCT,
RS_MIN_INCITE_TOTAL_FCT,
RS_MAX_INCITE_TOTAL_FCT,
"incite_cost.total_factor");
/* section: global_unit_options */
game.info.slow_invasions
= secfile_lookup_bool_default(file, RS_DEFAULT_SLOW_INVASIONS,
"global_unit_options.slow_invasions");
/* section: combat_rules */
game.info.killstack
= secfile_lookup_bool_default(file, RS_DEFAULT_KILLSTACK,
"combat_rules.killstack");
game.info.tired_attack
= secfile_lookup_bool_default(file, RS_DEFAULT_TIRED_ATTACK,
"combat_rules.tired_attack");
/* section: borders */
game.info.border_city_radius_sq
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_BORDER_RADIUS_SQ_CITY,
RS_MIN_BORDER_RADIUS_SQ_CITY,
RS_MAX_BORDER_RADIUS_SQ_CITY,
"borders.radius_sq_city");
game.info.border_size_effect
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_BORDER_SIZE_EFFECT,
RS_MIN_BORDER_SIZE_EFFECT,
RS_MAX_BORDER_SIZE_EFFECT,
"borders.size_effect");
/* section: research */
game.info.tech_upkeep_style
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_TECH_UPKEEP_STYLE,
RS_MIN_TECH_UPKEEP_STYLE,
RS_MAX_TECH_UPKEEP_STYLE,
"research.tech_upkeep_style");
game.info.tech_upkeep_divider
= secfile_lookup_int_default_min_max(file,
RS_DEFAULT_TECH_UPKEEP_DIVIDER,
RS_MIN_TECH_UPKEEP_DIVIDER,
RS_MAX_TECH_UPKEEP_DIVIDER,
"research.tech_upkeep_divider");
/* section: calendar */
game.info.calendar_skip_0
= secfile_lookup_bool_default(file, RS_DEFAULT_CALENDAR_SKIP_0,
"calendar.skip_year_0");
game.server.start_year
= secfile_lookup_int_default(file, GAME_START_YEAR,
"calendar.start_year");
sz_strlcpy(game.info.positive_year_label,
_(secfile_lookup_str_default(file,
RS_DEFAULT_POS_YEAR_LABEL,
"calendar.positive_label")));
sz_strlcpy(game.info.negative_year_label,
_(secfile_lookup_str_default(file,
RS_DEFAULT_NEG_YEAR_LABEL,
"calendar.negative_label")));
/* section playercolors */
{
struct rgbcolor *prgbcolor = NULL;
bool read = TRUE;
/* Check if the player list is defined and empty. */
fc_assert_ret(playercolor_count() == 0);
i = 0;
while (read) {
prgbcolor = NULL;
read = rgbcolor_load(file, &prgbcolor, "playercolors.colorlist%d", i);
if (read) {
playercolor_add(prgbcolor);
}
i++;
}
if (playercolor_count() == 0) {
ruleset_error(LOG_FATAL, "No player colors defined!");
}
if (game.plr_bg_color != NULL) {
rgbcolor_destroy(game.plr_bg_color);
game.plr_bg_color = NULL;
}
if (!rgbcolor_load(file, &game.plr_bg_color, "playercolors.background")) {
ruleset_error(LOG_FATAL, "No background player color defined! (%s)",
secfile_error());
}
}
/* section: teams */
svec = secfile_lookup_str_vec(file, &teams, "teams.names");
if (team_slot_count() < teams) {
teams = team_slot_count();
}
for (i = 0; i < teams; i++) {
team_slot_set_defined_name(team_slot_by_number(i), svec[i]);
}
free(svec);
settings_ruleset(file, "settings");
secfile_check_unused(file);
secfile_destroy(file);
}
/**************************************************************************
Send the units ruleset information (all individual unit classes) to the
specified connections.
**************************************************************************/
static void send_ruleset_unit_classes(struct conn_list *dest)
{
struct packet_ruleset_unit_class packet;
unit_class_iterate(c) {
packet.id = uclass_number(c);
sz_strlcpy(packet.name, untranslated_name(&c->name));
sz_strlcpy(packet.rule_name, rule_name(&c->name));
packet.move_type = c->move_type;
packet.min_speed = c->min_speed;
packet.hp_loss_pct = c->hp_loss_pct;
packet.hut_behavior = c->hut_behavior;
packet.flags = c->flags;
lsend_packet_ruleset_unit_class(dest, &packet);
} unit_class_iterate_end;
}
/**************************************************************************
Send the units ruleset information (all individual units) to the
specified connections.
**************************************************************************/
static void send_ruleset_units(struct conn_list *dest)
{
struct packet_ruleset_unit packet;
int i;
unit_type_iterate(u) {
packet.id = utype_number(u);
sz_strlcpy(packet.name, untranslated_name(&u->name));
sz_strlcpy(packet.rule_name, rule_name(&u->name));
sz_strlcpy(packet.sound_move, u->sound_move);
sz_strlcpy(packet.sound_move_alt, u->sound_move_alt);
sz_strlcpy(packet.sound_fight, u->sound_fight);
sz_strlcpy(packet.sound_fight_alt, u->sound_fight_alt);
sz_strlcpy(packet.graphic_str, u->graphic_str);
sz_strlcpy(packet.graphic_alt, u->graphic_alt);
packet.unit_class_id = uclass_number(utype_class(u));
packet.build_cost = u->build_cost;
packet.pop_cost = u->pop_cost;
packet.attack_strength = u->attack_strength;
packet.defense_strength = u->defense_strength;
packet.move_rate = u->move_rate;
packet.tech_requirement = u->require_advance
? advance_number(u->require_advance) : -1;
packet.impr_requirement = u->need_improvement
? improvement_number(u->need_improvement) : -1;
packet.gov_requirement = u->need_government
? government_number(u->need_government) : -1;
packet.vision_radius_sq = u->vision_radius_sq;
packet.transport_capacity = u->transport_capacity;
packet.hp = u->hp;
packet.firepower = u->firepower;
packet.obsoleted_by = u->obsoleted_by
? utype_number(u->obsoleted_by) : -1;
packet.converted_to = u->converted_to
? utype_number(u->converted_to) : -1;
packet.fuel = u->fuel;
packet.flags = u->flags;
packet.roles = u->roles;
packet.happy_cost = u->happy_cost;
output_type_iterate(o) {
packet.upkeep[o] = u->upkeep[o];
} output_type_iterate_end;
packet.paratroopers_range = u->paratroopers_range;
packet.paratroopers_mr_req = u->paratroopers_mr_req;
packet.paratroopers_mr_sub = u->paratroopers_mr_sub;
packet.bombard_rate = u->bombard_rate;
packet.city_size = u->city_size;
packet.cargo = u->cargo;
packet.targets = u->targets;
if (u->veteran == NULL) {
/* Use the default veteran system. */
packet.veteran_levels = 0;
} else {
/* Per unit veteran system definition. */
packet.veteran_levels = utype_veteran_levels(u);
for (i = 0; i < packet.veteran_levels; i++) {
const struct veteran_level *vlevel = utype_veteran_level(u, i);
sz_strlcpy(packet.veteran_name[i], untranslated_name(&vlevel->name));
packet.power_fact[i] = vlevel->power_fact;
packet.move_bonus[i] = vlevel->move_bonus;
}
}
PACKET_STRVEC_COMPUTE(packet.helptext, u->helptext);
lsend_packet_ruleset_unit(dest, &packet);
} unit_type_iterate_end;
}
/**************************************************************************
Send the specialists ruleset information (all individual specialist
types) to the specified connections.
**************************************************************************/
static void send_ruleset_specialists(struct conn_list *dest)
{
struct packet_ruleset_specialist packet;
specialist_type_iterate(spec_id) {
struct specialist *s = specialist_by_number(spec_id);
int j;
packet.id = spec_id;
sz_strlcpy(packet.plural_name, untranslated_name(&s->name));
sz_strlcpy(packet.rule_name, rule_name(&s->name));
sz_strlcpy(packet.short_name, untranslated_name(&s->abbreviation));
j = 0;
requirement_vector_iterate(&s->reqs, preq) {
packet.reqs[j++] = *preq;
} requirement_vector_iterate_end;
packet.reqs_count = j;
PACKET_STRVEC_COMPUTE(packet.helptext, s->helptext);
lsend_packet_ruleset_specialist(dest, &packet);
} specialist_type_iterate_end;
}
/**************************************************************************
Send the techs ruleset information (all individual advances) to the
specified connections.
**************************************************************************/
static void send_ruleset_techs(struct conn_list *dest)
{
struct packet_ruleset_tech packet;
advance_iterate(A_NONE, a) {
packet.id = advance_number(a);
sz_strlcpy(packet.name, untranslated_name(&a->name));
sz_strlcpy(packet.rule_name, rule_name(&a->name));
sz_strlcpy(packet.graphic_str, a->graphic_str);
sz_strlcpy(packet.graphic_alt, a->graphic_alt);
packet.req[AR_ONE] = a->require[AR_ONE]
? advance_number(a->require[AR_ONE]) : -1;
packet.req[AR_TWO] = a->require[AR_TWO]
? advance_number(a->require[AR_TWO]) : -1;
packet.root_req = a->require[AR_ROOT]
? advance_number(a->require[AR_ROOT]) : -1;
packet.flags = a->flags;
packet.preset_cost = a->preset_cost;
packet.num_reqs = a->num_reqs;
PACKET_STRVEC_COMPUTE(packet.helptext, a->helptext);
lsend_packet_ruleset_tech(dest, &packet);
} advance_iterate_end;
}
/**************************************************************************
Send the buildings ruleset information (all individual improvements and
wonders) to the specified connections.
**************************************************************************/
static void send_ruleset_buildings(struct conn_list *dest)
{
improvement_iterate(b) {
struct packet_ruleset_building packet;
int j;
packet.id = improvement_number(b);
packet.genus = b->genus;
sz_strlcpy(packet.name, untranslated_name(&b->name));
sz_strlcpy(packet.rule_name, rule_name(&b->name));
sz_strlcpy(packet.graphic_str, b->graphic_str);
sz_strlcpy(packet.graphic_alt, b->graphic_alt);
j = 0;
requirement_vector_iterate(&b->reqs, preq) {
packet.reqs[j++] = *preq;
} requirement_vector_iterate_end;
packet.reqs_count = j;
packet.obsolete_by = b->obsolete_by
? advance_number(b->obsolete_by) : -1;
packet.replaced_by = b->replaced_by
? improvement_number(b->replaced_by) : -1;
packet.build_cost = b->build_cost;
packet.upkeep = b->upkeep;
packet.sabotage = b->sabotage;
packet.flags = b->flags;
sz_strlcpy(packet.soundtag, b->soundtag);
sz_strlcpy(packet.soundtag_alt, b->soundtag_alt);
PACKET_STRVEC_COMPUTE(packet.helptext, b->helptext);
lsend_packet_ruleset_building(dest, &packet);
} improvement_iterate_end;
}
/**************************************************************************
Send the terrain ruleset information (terrain_control, and the individual
terrain types) to the specified connections.
**************************************************************************/
static void send_ruleset_terrain(struct conn_list *dest)
{
struct packet_ruleset_terrain packet;
lsend_packet_ruleset_terrain_control(dest, &terrain_control);
terrain_type_iterate(pterrain) {
struct resource **r;
packet.id = terrain_number(pterrain);
packet.native_to = pterrain->native_to;
sz_strlcpy(packet.name, untranslated_name(&pterrain->name));
sz_strlcpy(packet.rule_name, rule_name(&pterrain->name));
sz_strlcpy(packet.graphic_str, pterrain->graphic_str);
sz_strlcpy(packet.graphic_alt, pterrain->graphic_alt);
packet.movement_cost = pterrain->movement_cost;
packet.defense_bonus = pterrain->defense_bonus;
output_type_iterate(o) {
packet.output[o] = pterrain->output[o];
} output_type_iterate_end;
packet.num_resources = 0;
for (r = pterrain->resources; *r; r++) {
packet.resources[packet.num_resources++] = resource_number(*r);
}
packet.road_trade_incr = pterrain->road_trade_incr;
packet.road_time = pterrain->road_time;
packet.irrigation_result = (pterrain->irrigation_result
? terrain_number(pterrain->irrigation_result)
: terrain_count());
packet.irrigation_food_incr = pterrain->irrigation_food_incr;
packet.irrigation_time = pterrain->irrigation_time;
packet.mining_result = (pterrain->mining_result
? terrain_number(pterrain->mining_result)
: terrain_count());
packet.mining_shield_incr = pterrain->mining_shield_incr;
packet.mining_time = pterrain->mining_time;
packet.transform_result = (pterrain->transform_result
? terrain_number(pterrain->transform_result)
: terrain_count());
packet.transform_time = pterrain->transform_time;
packet.rail_time = pterrain->rail_time;
packet.clean_pollution_time = pterrain->clean_pollution_time;
packet.clean_fallout_time = pterrain->clean_fallout_time;
packet.flags = pterrain->flags;
packet.color_red = pterrain->rgb->r;
packet.color_green = pterrain->rgb->g;
packet.color_blue = pterrain->rgb->b;
PACKET_STRVEC_COMPUTE(packet.helptext, pterrain->helptext);
lsend_packet_ruleset_terrain(dest, &packet);
} terrain_type_iterate_end;
}
/****************************************************************************
Send the resource ruleset information to the specified connections.
****************************************************************************/
static void send_ruleset_resources(struct conn_list *dest)
{
struct packet_ruleset_resource packet;
resource_type_iterate (presource) {
packet.id = resource_number(presource);
sz_strlcpy(packet.name, untranslated_name(&presource->name));
sz_strlcpy(packet.rule_name, rule_name(&presource->name));
sz_strlcpy(packet.graphic_str, presource->graphic_str);
sz_strlcpy(packet.graphic_alt, presource->graphic_alt);
output_type_iterate(o) {
packet.output[o] = presource->output[o];
} output_type_iterate_end;
lsend_packet_ruleset_resource(dest, &packet);
} resource_type_iterate_end;
}
/**************************************************************************
Send the base ruleset information (all individual base types) to the
specified connections.
**************************************************************************/
static void send_ruleset_bases(struct conn_list *dest)
{
struct packet_ruleset_base packet;
base_type_iterate(b) {
int j;
packet.id = base_number(b);
sz_strlcpy(packet.name, untranslated_name(&b->name));
sz_strlcpy(packet.rule_name, rule_name(&b->name));
sz_strlcpy(packet.graphic_str, b->graphic_str);
sz_strlcpy(packet.graphic_alt, b->graphic_alt);
sz_strlcpy(packet.activity_gfx, b->activity_gfx);
packet.buildable = b->buildable;
packet.pillageable = b->pillageable;
j = 0;
requirement_vector_iterate(&b->reqs, preq) {
packet.reqs[j++] = *preq;
} requirement_vector_iterate_end;
packet.reqs_count = j;
packet.native_to = b->native_to;
packet.gui_type = b->gui_type;
packet.build_time = b->build_time;
packet.defense_bonus = b->defense_bonus;
packet.border_sq = b->border_sq;
packet.vision_main_sq = b->vision_main_sq;
packet.vision_invis_sq = b->vision_invis_sq;
packet.flags = b->flags;
packet.conflicts = b->conflicts;
PACKET_STRVEC_COMPUTE(packet.helptext, b->helptext);
lsend_packet_ruleset_base(dest, &packet);
} base_type_iterate_end;
}
/**************************************************************************
Send the government ruleset information to the specified connections.
One packet per government type, and for each type one per ruler title.
**************************************************************************/
static void send_ruleset_governments(struct conn_list *dest)
{
struct packet_ruleset_government gov;
struct packet_ruleset_government_ruler_title title;
int j;
governments_iterate(g) {
/* send one packet_government */
gov.id = government_number(g);
j = 0;
requirement_vector_iterate(&g->reqs, preq) {
gov.reqs[j++] = *preq;
} requirement_vector_iterate_end;
gov.reqs_count = j;
sz_strlcpy(gov.name, untranslated_name(&g->name));
sz_strlcpy(gov.rule_name, rule_name(&g->name));
sz_strlcpy(gov.graphic_str, g->graphic_str);
sz_strlcpy(gov.graphic_alt, g->graphic_alt);
PACKET_STRVEC_COMPUTE(gov.helptext, g->helptext);
lsend_packet_ruleset_government(dest, &gov);
/* Send one packet_government_ruler_title per ruler title. */
ruler_titles_iterate(government_ruler_titles(g), pruler_title) {
const struct nation_type *pnation = ruler_title_nation(pruler_title);
title.gov = government_number(g);
title.nation = (NULL != pnation ? nation_number(pnation) : -1);
sz_strlcpy(title.male_title,
ruler_title_male_untranslated_name(pruler_title));
sz_strlcpy(title.female_title,
ruler_title_female_untranslated_name(pruler_title));
lsend_packet_ruleset_government_ruler_title(dest, &title);
} ruler_titles_iterate_end;
} governments_iterate_end;
}
/**************************************************************************
Send the nations ruleset information (info on each nation) to the
specified connections.
**************************************************************************/
static void send_ruleset_nations(struct conn_list *dest)
{
struct packet_ruleset_nation packet;
struct packet_ruleset_nation_groups groups_packet;
int i;
groups_packet.ngroups = nation_group_count();
i = 0;
nation_groups_iterate(pgroup) {
sz_strlcpy(groups_packet.groups[i++],
nation_group_untranslated_name(pgroup));
} nation_groups_iterate_end;
lsend_packet_ruleset_nation_groups(dest, &groups_packet);
nations_iterate(n) {
packet.id = nation_number(n);
sz_strlcpy(packet.adjective, untranslated_name(&n->adjective));
sz_strlcpy(packet.rule_name, rule_name(&n->adjective));
sz_strlcpy(packet.noun_plural, untranslated_name(&n->noun_plural));
sz_strlcpy(packet.graphic_str, n->flag_graphic_str);
sz_strlcpy(packet.graphic_alt, n->flag_graphic_alt);
i = 0;
nation_leader_list_iterate(nation_leaders(n), pleader) {
sz_strlcpy(packet.leader_name[i], nation_leader_name(pleader));
packet.leader_is_male[i] = nation_leader_is_male(pleader);
i++;
} nation_leader_list_iterate_end;
packet.leader_count = i;
packet.city_style = n->city_style;
packet.is_playable = n->is_playable;
packet.is_available = n->is_available;
packet.barbarian_type = n->barb_type;
sz_strlcpy(packet.legend, n->legend);
i = 0;
nation_group_list_iterate(n->groups, pgroup) {
packet.groups[i++] = nation_group_number(pgroup);
} nation_group_list_iterate_end;
packet.ngroups = i;
packet.init_government_id = government_number(n->init_government);
fc_assert(ARRAY_SIZE(packet.init_techs) == ARRAY_SIZE(n->init_techs));
for (i = 0; i < MAX_NUM_TECH_LIST; i++) {
packet.init_techs[i] = n->init_techs[i];
}
fc_assert(ARRAY_SIZE(packet.init_units) == ARRAY_SIZE(n->init_units));
for (i = 0; i < MAX_NUM_UNIT_LIST; i++) {
const struct unit_type *t = n->init_units[i];
packet.init_units[i] = t ? utype_number(t) : U_LAST;
}
fc_assert(ARRAY_SIZE(packet.init_buildings)
== ARRAY_SIZE(n->init_buildings));
for (i = 0; i < MAX_NUM_BUILDING_LIST; i++) {
/* Impr_type_id to int */
packet.init_buildings[i] = n->init_buildings[i];
}
lsend_packet_ruleset_nation(dest, &packet);
} nations_iterate_end;
}
/**************************************************************************
Send the city-style ruleset information (each style) to the specified
connections.
**************************************************************************/
static void send_ruleset_cities(struct conn_list *dest)
{
struct packet_ruleset_city city_p;
int k, j;
for (k = 0; k < game.control.styles_count; k++) {
city_p.style_id = k;
city_p.replaced_by = city_styles[k].replaced_by;
j = 0;
requirement_vector_iterate(&city_styles[k].reqs, preq) {
city_p.reqs[j++] = *preq;
} requirement_vector_iterate_end;
city_p.reqs_count = j;
sz_strlcpy(city_p.name, untranslated_name(&city_styles[k].name));
sz_strlcpy(city_p.rule_name, rule_name(&city_styles[k].name));
sz_strlcpy(city_p.graphic, city_styles[k].graphic);
sz_strlcpy(city_p.graphic_alt, city_styles[k].graphic_alt);
sz_strlcpy(city_p.oceanic_graphic, city_styles[k].oceanic_graphic);
sz_strlcpy(city_p.oceanic_graphic_alt, city_styles[k].oceanic_graphic_alt);
sz_strlcpy(city_p.citizens_graphic, city_styles[k].citizens_graphic);
sz_strlcpy(city_p.citizens_graphic_alt,
city_styles[k].citizens_graphic_alt);
lsend_packet_ruleset_city(dest, &city_p);
}
}
/**************************************************************************
Send information in packet_ruleset_game (miscellaneous rules) to the
specified connections.
**************************************************************************/
static void send_ruleset_game(struct conn_list *dest)
{
struct packet_ruleset_game misc_p;
int i;
fc_assert_ret(game.veteran != NULL);
/* Per unit veteran system definition. */
misc_p.veteran_levels = game.veteran->levels;
for (i = 0; i < misc_p.veteran_levels; i++) {
const struct veteran_level *vlevel = game.veteran->definitions + i;
sz_strlcpy(misc_p.veteran_name[i], untranslated_name(&vlevel->name));
misc_p.power_fact[i] = vlevel->power_fact;
misc_p.move_bonus[i] = vlevel->move_bonus;
}
fc_assert(sizeof(misc_p.global_init_techs)
== sizeof(game.rgame.global_init_techs));
fc_assert(ARRAY_SIZE(misc_p.global_init_techs)
== ARRAY_SIZE(game.rgame.global_init_techs));
memcpy(misc_p.global_init_techs, game.rgame.global_init_techs,
sizeof(misc_p.global_init_techs));
fc_assert(ARRAY_SIZE(misc_p.global_init_buildings)
== ARRAY_SIZE(game.rgame.global_init_buildings));
for (i = 0; i < MAX_NUM_BUILDING_LIST; i++) {
/* Impr_type_id to int */
misc_p.global_init_buildings[i] =
game.rgame.global_init_buildings[i];
}
misc_p.default_specialist = DEFAULT_SPECIALIST;
fc_assert_ret(game.plr_bg_color != NULL);
misc_p.background_red = game.plr_bg_color->r;
misc_p.background_green = game.plr_bg_color->g;
misc_p.background_blue = game.plr_bg_color->b;
lsend_packet_ruleset_game(dest, &misc_p);
}
/**************************************************************************
Send all team names defined in the ruleset file(s) to the
specified connections.
**************************************************************************/
static void send_ruleset_team_names(struct conn_list *dest)
{
struct packet_team_name_info team_name_info_p;
team_slots_iterate(tslot) {
const char *name = team_slot_defined_name(tslot);
if (NULL == name) {
/* End of defined names. */
break;
}
team_name_info_p.team_id = team_slot_index(tslot);
sz_strlcpy(team_name_info_p.team_name, name);
lsend_packet_team_name_info(dest, &team_name_info_p);
} team_slots_iterate_end;
}
/**************************************************************************
Loads the ruleset currently given in game.rulesetdir.
This may be called more than once and it will free any stale data.
**************************************************************************/
void load_rulesets(void)
{
struct section_file *techfile, *unitfile, *buildfile, *govfile, *terrfile;
struct section_file *cityfile, *nationfile, *effectfile;
log_normal(_("Loading rulesets."));
game_ruleset_free();
/* Reset the list of available player colors. */
playercolor_free();
playercolor_init();
game_ruleset_init();
server.playable_nations = 0;
techfile = openload_ruleset_file("techs");
load_tech_names(techfile);
buildfile = openload_ruleset_file("buildings");
load_building_names(buildfile);
govfile = openload_ruleset_file("governments");
load_government_names(govfile);
unitfile = openload_ruleset_file("units");
load_unit_names(unitfile);
terrfile = openload_ruleset_file("terrain");
load_terrain_names(terrfile);
cityfile = openload_ruleset_file("cities");
load_citystyle_names(cityfile);
nationfile = openload_ruleset_file("nations");
load_nation_names(nationfile);
effectfile = openload_ruleset_file("effects");
load_ruleset_techs(techfile);
load_ruleset_cities(cityfile);
load_ruleset_governments(govfile);
load_ruleset_terrain(terrfile); /* terrain must precede nations and units */
load_ruleset_units(unitfile);
load_ruleset_buildings(buildfile);
load_ruleset_nations(nationfile);
load_ruleset_effects(effectfile);
load_ruleset_game();
/* Init nations we just loaded. */
init_available_nations();
sanity_check_ruleset_data();
precalc_tech_data();
if (base_sections) {
free(base_sections);
base_sections = NULL;
}
if (resource_sections) {
free(resource_sections);
resource_sections = NULL;
}
if (terrain_sections) {
free(terrain_sections);
terrain_sections = NULL;
}
script_server_free();
script_server_init();
openload_script_file("default");
openload_script_file("script");
/* Build advisors unit class cache corresponding to loaded rulesets */
adv_units_ruleset_init();
CALL_FUNC_EACH_AI(units_ruleset_init);
/* We may need to adjust the number of AI players
* if the number of available nations changed. */
if (game.info.aifill > server.playable_nations) {
log_normal(_("Reducing aifill because there "
"are not enough playable nations."));
game.info.aifill = server.playable_nations;
aifill(game.info.aifill);
}
}
/**************************************************************************
Reload the game settings saved in the ruleset file.
**************************************************************************/
void reload_rulesets_settings(void)
{
struct section_file *file;
file = openload_ruleset_file("game");
settings_ruleset(file, "settings");
secfile_destroy(file);
}
/**************************************************************************
Send all ruleset information to the specified connections.
**************************************************************************/
void send_rulesets(struct conn_list *dest)
{
conn_list_compression_freeze(dest);
/* ruleset_control also indicates to client that ruleset sending starts. */
send_ruleset_control(dest);
send_ruleset_game(dest);
send_ruleset_team_names(dest);
send_ruleset_techs(dest);
send_ruleset_governments(dest);
send_ruleset_unit_classes(dest);
send_ruleset_units(dest);
send_ruleset_specialists(dest);
send_ruleset_resources(dest);
send_ruleset_terrain(dest);
send_ruleset_bases(dest);
send_ruleset_buildings(dest);
send_ruleset_nations(dest);
send_ruleset_cities(dest);
send_ruleset_cache(dest);
/* Indicate client that all rulesets have now been sent. */
conn_list_iterate(dest, pconn) {
if (has_capability("rules_finished", pconn->capability)) {
lsend_packet_rulesets_ready(pconn->self);
}
} conn_list_iterate_end;
/* changed game settings will be send in
* connecthand.c:establish_new_connection() */
conn_list_compression_thaw(dest);
}
/**************************************************************************
Does nation have tech initially?
**************************************************************************/
static bool nation_has_initial_tech(struct nation_type *pnation,
struct advance *tech)
{
int i;
/* See if it's given as global init tech */
for (i = 0; i < MAX_NUM_TECH_LIST
&& game.rgame.global_init_techs[i] != A_LAST; i++) {
if (game.rgame.global_init_techs[i] == advance_number(tech)) {
return TRUE;
}
}
/* See if it's given as national init tech */
for (i = 0;
i < MAX_NUM_TECH_LIST && pnation->init_techs[i] != A_LAST;
i++) {
if (pnation->init_techs[i] == advance_number(tech)) {
return TRUE;
}
}
return FALSE;
}
/**************************************************************************
Helper function for sanity_check_req_list() and sanity_check_req_vec()
**************************************************************************/
static bool sanity_check_req_set(int reqs_of_type[], int local_reqs_of_type[],
struct requirement *preq,
int max_tiles, const char *list_for)
{
int rc;
fc_assert_ret_val(universals_n_is_valid(preq->source.kind), FALSE);
/* Add to counter */
reqs_of_type[preq->source.kind]++;
rc = reqs_of_type[preq->source.kind];
if (preq->range == REQ_RANGE_LOCAL) {
local_reqs_of_type[preq->source.kind]++;
switch (preq->source.kind) {
case VUT_TERRAINCLASS:
if (local_reqs_of_type[VUT_TERRAIN] > 0) {
log_error("%s: Requirement list has both local terrain and terrainclass requirement",
list_for);
return FALSE;
}
break;
case VUT_TERRAIN:
if (local_reqs_of_type[VUT_TERRAINCLASS] > 0) {
log_error("%s: Requirement list has both local terrain and terrainclass requirement",
list_for);
return FALSE;
}
break;
default:
break;
}
}
if (rc > 1) {
/* Multiple requirements of the same type */
switch (preq->source.kind) {
case VUT_GOVERNMENT:
case VUT_NATION:
case VUT_UTYPE:
case VUT_UCLASS:
case VUT_OTYPE:
case VUT_SPECIALIST:
case VUT_MINSIZE: /* Breaks nothing, but has no sense either */
case VUT_MINYEAR:
case VUT_AI_LEVEL:
case VUT_TERRAINALTER: /* Local range only */
case VUT_CITYTILE:
/* There can be only one requirement of these types (with current
* range limitations)
* Requirements might be identical, but we consider multiple
* declarations error anyway. */
log_error("%s: Requirement list has multiple %s requirements",
list_for, universal_type_rule_name(&preq->source));
return FALSE;
break;
case VUT_TERRAIN:
/* There can be only up to max_tiles requirements of these types */
if (max_tiles != -1 && rc > max_tiles) {
log_error("%s: Requirement list has more %s requirements than "
"can ever be fullfilled.", list_for,
universal_type_rule_name(&preq->source));
return FALSE;
}
break;
case VUT_TERRAINCLASS:
if (rc > 2 || (max_tiles != -1 && rc > max_tiles)) {
log_error("%s: Requirement list has more %s requirements than "
"can ever be fullfilled.", list_for,
universal_type_rule_name(&preq->source));
return FALSE;
}
break;
case VUT_SPECIAL:
case VUT_BASE:
/* Note that there can be more than 1 special or base / tile. */
case VUT_NONE:
case VUT_ADVANCE:
case VUT_IMPROVEMENT:
case VUT_UTFLAG:
case VUT_UCFLAG:
/* Can have multiple requirements of these types */
break;
case VUT_COUNT:
/* Should never be in requirement vector */
fc_assert(FALSE);
return FALSE;
break;
/* No default handling here, as we want compiler warning
* if new requirement type is added to enum and it's not handled
* here. */
}
}
return TRUE;
}
/**************************************************************************
Check if requirement list is free of conflicting requirements.
max_tiles is number of tiles that can provide requirement. Value -1
disables checking based on number of tiles.
Returns TRUE iff everything ok.
TODO: This is based on current hardcoded range limitations.
- There should be method of automatically determining these
limitations for each requirement type
- This function should check also problems caused by defining
range to less than hardcoded max for requirement type
**************************************************************************/
static bool sanity_check_req_list(const struct requirement_list *preqs,
int max_tiles,
const char *list_for)
{
int reqs_of_type[VUT_COUNT];
int local_reqs_of_type[VUT_COUNT];
/* Initialize requirement counters */
memset(reqs_of_type, 0, sizeof(reqs_of_type));
memset(local_reqs_of_type, 0, sizeof(local_reqs_of_type));
requirement_list_iterate(preqs, preq) {
if (!sanity_check_req_set(reqs_of_type, local_reqs_of_type, preq, max_tiles, list_for)) {
return FALSE;
}
} requirement_list_iterate_end;
return TRUE;
}
/**************************************************************************
Requirement vector version of requirement sanity checking. See
retuirement list version for comments.
**************************************************************************/
static bool sanity_check_req_vec(const struct requirement_vector *preqs,
int max_tiles,
const char *list_for)
{
int reqs_of_type[VUT_COUNT];
int local_reqs_of_type[VUT_COUNT];
/* Initialize requirement counters */
memset(reqs_of_type, 0, sizeof(reqs_of_type));
memset(local_reqs_of_type, 0, sizeof(local_reqs_of_type));
requirement_vector_iterate(preqs, preq) {
if (!sanity_check_req_set(reqs_of_type, local_reqs_of_type, preq, max_tiles, list_for)) {
return FALSE;
}
} requirement_vector_iterate_end;
return TRUE;
}
/**************************************************************************
Check that requirement list and negated requirements list do not have
confliciting requirements.
Returns TRUE iff everything ok.
**************************************************************************/
static bool sanity_check_req_nreq_list(const struct requirement_list *preqs,
const struct requirement_list *pnreqs,
int one_tile,
const char *list_for)
{
/* Check internal sanity of requirement list */
if (!sanity_check_req_list(preqs, one_tile, list_for)) {
return FALSE;
}
/* There is no pnreqs in all cases */
if (pnreqs != NULL) {
/* Check sanity between reqs and nreqs */
requirement_list_iterate(preqs, preq) {
requirement_list_iterate(pnreqs, pnreq) {
if (are_requirements_equal(preq, pnreq)) {
log_error("%s: Identical %s requirement in requirements and "
"negated requirements.", list_for,
universal_type_rule_name(&preq->source));
return FALSE;
}
} requirement_list_iterate_end;
} requirement_list_iterate_end;
}
return TRUE;
}
/**************************************************************************
Sanity check callback for iterating effects cache.
**************************************************************************/
static bool effect_list_sanity_cb(const struct effect *peffect)
{
int one_tile = -1; /* TODO: Determine correct value from effect.
* -1 disables checking */
return sanity_check_req_nreq_list(peffect->reqs, peffect->nreqs, one_tile,
effect_type_name(peffect->type));
}
/**************************************************************************
Some more sanity checking once all rulesets are loaded. These check
for some cross-referencing which was impossible to do while only one
party was loaded in load_ruleset_xxx()
Returns TRUE iff everything ok.
**************************************************************************/
static bool sanity_check_ruleset_data(void)
{
int num_utypes;
int i;
bool ok = TRUE; /* Store failures to variable instead of returning
* immediately so all errors get printed, not just first
* one. */
/* Check that all players can have their initial techs */
nations_iterate(pnation) {
int i;
/* Check global initial techs */
for (i = 0; i < MAX_NUM_TECH_LIST
&& game.rgame.global_init_techs[i] != A_LAST; i++) {
Tech_type_id tech = game.rgame.global_init_techs[i];
struct advance *a = valid_advance_by_number(tech);
if (!a) {
ruleset_error(LOG_FATAL,
"Tech %s does not exist, but is initial "
"tech for everyone.",
advance_rule_name(advance_by_number(tech)));
}
if (advance_by_number(A_NONE) != a->require[AR_ROOT]
&& !nation_has_initial_tech(pnation, a->require[AR_ROOT])) {
/* Nation has no root_req for tech */
ruleset_error(LOG_FATAL,
"Tech %s is initial for everyone, but %s has "
"no root_req for it.",
advance_rule_name(a),
nation_rule_name(pnation));
}
}
/* Check national initial techs */
for (i = 0;
i < MAX_NUM_TECH_LIST && pnation->init_techs[i] != A_LAST;
i++) {
Tech_type_id tech = pnation->init_techs[i];
struct advance *a = valid_advance_by_number(tech);
if (!a) {
ruleset_error(LOG_FATAL,
"Tech %s does not exist, but is tech for %s.",
advance_rule_name(advance_by_number(tech)),
nation_rule_name(pnation));
ok = FALSE;
}
if (advance_by_number(A_NONE) != a->require[AR_ROOT]
&& !nation_has_initial_tech(pnation, a->require[AR_ROOT])) {
/* Nation has no root_req for tech */
ruleset_error(LOG_FATAL,
"Tech %s is initial for %s, but they have "
"no root_req for it.",
advance_rule_name(a),
nation_rule_name(pnation));
ok = FALSE;
}
}
} nations_iterate_end;
/* Check against unit upgrade loops */
num_utypes = game.control.num_unit_types;
unit_type_iterate(putype) {
int chain_length = 0;
struct unit_type *upgraded = putype;
while(upgraded != NULL) {
upgraded = upgraded->obsoleted_by;
chain_length++;
if (chain_length > num_utypes) {
ruleset_error(LOG_FATAL,
"There seems to be obsoleted_by loop in update "
"chain that starts from %s", utype_rule_name(putype));
ok = FALSE;
}
}
} unit_type_iterate_end;
/* Check requirement sets against conflicting requirements.
* Effects use requirement lists */
if (!iterate_effect_cache(effect_list_sanity_cb)) {
ruleset_error(LOG_FATAL, "Effects have conflicting requirements!");
ok = FALSE;
}
/* Others use requirement vectors
* Buildings */
improvement_iterate(pimprove) {
if (!sanity_check_req_vec(&pimprove->reqs, -1,
improvement_rule_name(pimprove))) {
ruleset_error(LOG_FATAL, "Buildings have conflicting requirements!");
ok = FALSE;
}
} improvement_iterate_end;
/* Governments */
governments_iterate(pgov) {
if (!sanity_check_req_vec(&pgov->reqs, -1,
government_rule_name(pgov))) {
ruleset_error(LOG_FATAL, "Governments have conflicting requirements!");
ok = FALSE;
}
} governments_iterate_end;
/* Specialists */
specialist_type_iterate(sp) {
struct specialist *psp = specialist_by_number(sp);
if (!sanity_check_req_vec(&psp->reqs, -1,
specialist_rule_name(psp))) {
ruleset_error(LOG_FATAL, "Specialists have conflicting requirements!");
ok = FALSE;
}
} specialist_type_iterate_end;
/* Bases */
base_type_iterate(pbase) {
if (!sanity_check_req_vec(&pbase->reqs, -1,
base_rule_name(pbase))) {
ruleset_error(LOG_FATAL, "Bases have conflicting requirements!");
ok = FALSE;
}
} base_type_iterate_end
/* City styles */
for (i = 0; i < game.control.styles_count; i++) {
if (!sanity_check_req_vec(&city_styles[i].reqs, -1,
city_style_rule_name(i))) {
ruleset_error(LOG_FATAL, "City styles have conflicting requirements!");
ok = FALSE;
}
}
terrain_type_iterate(pterr) {
unit_class_iterate(uc) {
if (BV_ISSET(pterr->native_to, uclass_index(uc))) {
if (is_ocean(pterr) && uc->move_type == UMT_LAND) {
ruleset_error(LOG_FATAL,
"Oceanic %s is native to land units.",
terrain_rule_name(pterr));
ok = FALSE;
} else if (!is_ocean(pterr) && uc->move_type == UMT_SEA) {
ruleset_error(LOG_FATAL,
"Non-oceanic %s is native to sea units.",
terrain_rule_name(pterr));
ok = FALSE;
}
}
} unit_class_iterate_end;
} terrain_type_iterate_end;
return ok;
}
| jheusala/freeciv | server/ruleset.c | C | gpl-2.0 | 174,636 |
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
#include <asm/mach-types.h>
#include <mach/msm_bus_board.h>
#include <mach/msm_memtypes.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/gpiomux.h>
#include <linux/ion.h>
#include <mach/ion.h>
#include "devices.h"
#ifndef CONFIG_MACH_LGE
#endif
#include <linux/fb.h>
#include "../../../../drivers/video/msm/msm_fb.h"
#include "../../../../drivers/video/msm/msm_fb_def.h"
#include "../../../../drivers/video/msm/mipi_dsi.h"
#include <mach/board_lge.h>
#include CONFIG_BOARD_HEADER_FILE
#ifdef CONFIG_LGE_KCAL
#ifdef CONFIG_LGE_QC_LCDC_LUT
extern int set_qlut_kcal_values(int kcal_r, int kcal_g, int kcal_b);
extern int refresh_qlut_display(void);
#else
#error only kcal by Qucalcomm LUT is supported now!!!
#endif
#endif
#ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
#define MSM_FB_PRIM_BUF_SIZE (LCD_RESOLUTION_X * LCD_RESOLUTION_Y * 4 * 3)
/* 4(bpp) x 3(pages) */
#else
#define MSM_FB_PRIM_BUF_SIZE (LCD_RESOLUTION_X * LCD_RESOLUTION_Y * 4 * 2)
/* 4(bpp) x 2(pages) */
#endif
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
#define MSM_FB_EXT_BUF_SIZE (1920 * 1088 * 2 * 1) /* 2 bpp x 1 page */
#elif defined(CONFIG_FB_MSM_TVOUT)
#define MSM_FB_EXT_BUF_SIZE (720 * 576 * 2 * 2) /* 2 bpp x 2 pages */
#else
#define MSM_FB_EXT_BUF_SIZE 0
#endif
/* Note: must be multiple of 4096 */
#define MSM_FB_SIZE roundup(MSM_FB_PRIM_BUF_SIZE + MSM_FB_EXT_BUF_SIZE, 4096)
#ifdef CONFIG_FB_MSM_OVERLAY0_WRITEBACK
#define MSM_FB_OVERLAY0_WRITEBACK_SIZE roundup((LCD_RESOLUTION_X * LCD_RESOLUTION_Y * 3 * 2), 4096)
#else
#define MSM_FB_OVERLAY0_WRITEBACK_SIZE (0)
#endif /* CONFIG_FB_MSM_OVERLAY0_WRITEBACK */
#ifdef CONFIG_FB_MSM_OVERLAY1_WRITEBACK
#define MSM_FB_OVERLAY1_WRITEBACK_SIZE roundup((1920 * 1088 * 3 * 2), 4096)
#else
#define MSM_FB_OVERLAY1_WRITEBACK_SIZE (0)
#endif /* CONFIG_FB_MSM_OVERLAY1_WRITEBACK */
#define MDP_VSYNC_GPIO 0
#define MIPI_CMD_NOVATEK_QHD_PANEL_NAME "mipi_cmd_novatek_qhd"
#define MIPI_VIDEO_NOVATEK_QHD_PANEL_NAME "mipi_video_novatek_qhd"
#define MIPI_VIDEO_TOSHIBA_WSVGA_PANEL_NAME "mipi_video_toshiba_wsvga"
#define MIPI_VIDEO_TOSHIBA_WUXGA_PANEL_NAME "mipi_video_toshiba_wuxga"
#define MIPI_VIDEO_CHIMEI_WXGA_PANEL_NAME "mipi_video_chimei_wxga"
#define MIPI_VIDEO_SIMULATOR_VGA_PANEL_NAME "mipi_video_simulator_vga"
#define MIPI_CMD_RENESAS_FWVGA_PANEL_NAME "mipi_cmd_renesas_fwvga"
#define HDMI_PANEL_NAME "hdmi_msm"
#define TVOUT_PANEL_NAME "tvout_msm"
#ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY
unsigned char hdmi_is_primary = 1;
#else
unsigned char hdmi_is_primary;
#endif
#define TUNING_BUFSIZE 4096
#define TUNING_REGSIZE 40
#define TUNING_REGNUM 10
#define LCD_GAMMA 0
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
#define CABC_POWERON_OFFSET 4 /* offset from lcd display on cmds */
#define CABC_OFF 0
#define CABC_ON 1
#define CABC_10 1
#define CABC_20 2
#define CABC_30 3
#define CABC_40 4
#define CABC_50 5
#define CABC_DEFAULT CABC_10
#if defined (CONFIG_LGE_BACKLIGHT_CABC_DEBUG)
static int lgit_cabc_index = CABC_DEFAULT;
#endif /* CONFIG_LGE_BACKLIGHT_CABC_DEBUG */
#endif /* CONFIG_LGE_BACKLIGHT_CABC */
static struct resource msm_fb_resources[] = {
{
.flags = IORESOURCE_DMA,
}
};
#ifndef CONFIG_MACH_LGE
#ifndef CONFIG_FB_MSM_MIPI_PANEL_DETECT
static void set_mdp_clocks_for_wuxga(void);
#endif
#endif
static int msm_fb_detect_panel(const char *name)
{
return 0;
}
static struct msm_fb_platform_data msm_fb_pdata = {
.detect_client = msm_fb_detect_panel,
};
static struct platform_device msm_fb_device = {
.name = "msm_fb",
.id = 0,
.num_resources = ARRAY_SIZE(msm_fb_resources),
.resource = msm_fb_resources,
.dev.platform_data = &msm_fb_pdata,
};
#ifndef CONFIG_MACH_LGE
static void mipi_dsi_panel_pwm_cfg(void)
{
int rc;
static int mipi_dsi_panel_gpio_configured;
static struct pm_gpio pwm_enable = {
.direction = PM_GPIO_DIR_OUT,
.output_buffer = PM_GPIO_OUT_BUF_CMOS,
.output_value = 1,
.pull = PM_GPIO_PULL_NO,
.vin_sel = PM_GPIO_VIN_VPH,
.out_strength = PM_GPIO_STRENGTH_HIGH,
.function = PM_GPIO_FUNC_NORMAL,
.inv_int_pol = 0,
.disable_pin = 0,
};
static struct pm_gpio pwm_mode = {
.direction = PM_GPIO_DIR_OUT,
.output_buffer = PM_GPIO_OUT_BUF_CMOS,
.output_value = 0,
.pull = PM_GPIO_PULL_NO,
.vin_sel = PM_GPIO_VIN_S4,
.out_strength = PM_GPIO_STRENGTH_HIGH,
.function = PM_GPIO_FUNC_2,
.inv_int_pol = 0,
.disable_pin = 0,
};
if (mipi_dsi_panel_gpio_configured == 0) {
/* pm8xxx: gpio-21, Backlight Enable */
rc = pm8xxx_gpio_config(PM8921_GPIO_PM_TO_SYS(21),
&pwm_enable);
if (rc != 0)
pr_err("%s: pwm_enabled failed\n", __func__);
/* pm8xxx: gpio-24, Bl: Off, PWM mode */
rc = pm8xxx_gpio_config(PM8921_GPIO_PM_TO_SYS(24),
&pwm_mode);
if (rc != 0)
pr_err("%s: pwm_mode failed\n", __func__);
mipi_dsi_panel_gpio_configured++;
}
}
#endif
static bool dsi_power_on;
/* LGE_CHANGE
* LG Display 4.0' WVGA for l_dcm
* kyunghoo.ryu@lge.com
*/
static int mipi_dsi_panel_power(int on)
{
static struct regulator *reg_l8, *reg_l2, *reg_lvs6;
static int gpio43 = PM8921_GPIO_PM_TO_SYS(43);
int rc;
pr_debug("%s: state : %d\n", __func__, on);
if (!dsi_power_on) {
reg_l8 = regulator_get(&msm_mipi_dsi1_device.dev,
"dsi_vdc");
if (IS_ERR(reg_l8)) {
pr_err("could not get 8921_l8, rc = %ld\n",
PTR_ERR(reg_l8));
return -ENODEV;
}
reg_lvs6 = regulator_get(&msm_mipi_dsi1_device.dev,
"8921_lvs6");
if (IS_ERR(reg_lvs6)) {
pr_err("could not get 8921_lvs6, rc = %ld\n",
PTR_ERR(reg_lvs6));
return -ENODEV;
}
reg_l2 = regulator_get(&msm_mipi_dsi1_device.dev,
"dsi_vdda");
if (IS_ERR(reg_l2)) {
pr_err("could not get 8921_l2, rc = %ld\n",
PTR_ERR(reg_l2));
return -ENODEV;
}
rc = regulator_set_voltage(reg_l8, 2800000, 2800000);
if (rc) {
pr_err("set_voltage l8 failed, rc=%d\n", rc);
return -EINVAL;
}
rc = regulator_set_voltage(reg_l2, 1200000, 1200000);
if (rc) {
pr_err("set_voltage l2 failed, rc=%d\n", rc);
return -EINVAL;
}
/* VREG_2P8_LCD_VCI enable - kyunghoo.ryu@lge.com */
rc = gpio_request(LCD_VCI_EN_GPIO, "LCD_VCI_EN_GPIO");
if (rc) {
pr_err("'%s'(%d) gpio_request failed, rc=%d\n",
"LCD_VCI_EN_GPIO", LCD_VCI_EN_GPIO, rc);
}
gpio_tlmm_config(GPIO_CFG(LCD_VCI_EN_GPIO, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_ENABLE);
rc = gpio_request(gpio43, "disp_rst_n");
if (rc) {
pr_err("request gpio 43 failed, rc=%d\n", rc);
return -ENODEV;
}
dsi_power_on = true;
}
if (on) {
rc = regulator_set_optimum_mode(reg_l8, 100000);
if (rc < 0) {
pr_err("set_optimum_mode l8 failed, rc=%d\n", rc);
return -EINVAL;
}
rc = regulator_set_optimum_mode(reg_l2, 100000);
if (rc < 0) {
pr_err("set_optimum_mode l2 failed, rc=%d\n", rc);
return -EINVAL;
}
rc = regulator_enable(reg_l8);
if (rc) {
pr_err("enable l8 failed, rc=%d\n", rc);
return -ENODEV;
}
rc = regulator_enable(reg_lvs6);
if (rc) {
pr_err("enable lvs6 failed, rc=%d\n", rc);
return -ENODEV;
}
rc = gpio_direction_output(LCD_VCI_EN_GPIO, 1);
mdelay(1);
rc = regulator_enable(reg_l2);
if (rc) {
pr_err("enable l2 failed, rc=%d\n", rc);
return -ENODEV;
}
} else {
rc = regulator_disable(reg_l8);
if (rc) {
pr_err("disable reg_l8 failed, rc=%d\n", rc);
return -ENODEV;
}
rc = regulator_disable(reg_lvs6);
if (rc) {
pr_err("disable reg_lvs6 failed, rc=%d\n", rc);
return -ENODEV;
}
rc = regulator_disable(reg_l2);
if (rc) {
pr_err("enable l2 failed, rc=%d\n", rc);
return -ENODEV;
}
/* LCD Reset LOW */
gpio_direction_output(gpio43, 0);
/* LCD VCI EN LOW */
rc = gpio_direction_output(LCD_VCI_EN_GPIO, 0);
rc = regulator_set_optimum_mode(reg_l8, 100);
if (rc < 0) {
pr_err("set_optimum_mode l8 failed, rc=%d\n", rc);
return -EINVAL;
}
rc = regulator_set_optimum_mode(reg_l2, 100);
if (rc < 0) {
pr_err("set_optimum_mode l2 failed, rc=%d\n", rc);
return -EINVAL;
}
}
return 0;
}
static struct mipi_dsi_platform_data mipi_dsi_pdata = {
.vsync_gpio = MDP_VSYNC_GPIO,
.dsi_power_save = mipi_dsi_panel_power,
};
#ifdef CONFIG_MSM_BUS_SCALING
static struct msm_bus_vectors rotator_init_vectors[] = {
{
.src = MSM_BUS_MASTER_ROTATOR,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors rotator_ui_vectors[] = {
{
.src = MSM_BUS_MASTER_ROTATOR,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = (1024 * 600 * 4 * 2 * 60),
.ib = (1024 * 600 * 4 * 2 * 60 * 1.5),
},
};
static struct msm_bus_vectors rotator_vga_vectors[] = {
{
.src = MSM_BUS_MASTER_ROTATOR,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = (640 * 480 * 2 * 2 * 30),
.ib = (640 * 480 * 2 * 2 * 30 * 1.5),
},
};
static struct msm_bus_vectors rotator_720p_vectors[] = {
{
.src = MSM_BUS_MASTER_ROTATOR,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = (1280 * 736 * 2 * 2 * 30),
.ib = (1280 * 736 * 2 * 2 * 30 * 1.5),
},
};
static struct msm_bus_vectors rotator_1080p_vectors[] = {
{
.src = MSM_BUS_MASTER_ROTATOR,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = (1920 * 1088 * 2 * 2 * 30),
.ib = (1920 * 1088 * 2 * 2 * 30 * 1.5),
},
};
static struct msm_bus_paths rotator_bus_scale_usecases[] = {
{
ARRAY_SIZE(rotator_init_vectors),
rotator_init_vectors,
},
{
ARRAY_SIZE(rotator_ui_vectors),
rotator_ui_vectors,
},
{
ARRAY_SIZE(rotator_vga_vectors),
rotator_vga_vectors,
},
{
ARRAY_SIZE(rotator_720p_vectors),
rotator_720p_vectors,
},
{
ARRAY_SIZE(rotator_1080p_vectors),
rotator_1080p_vectors,
},
};
struct msm_bus_scale_pdata rotator_bus_scale_pdata = {
rotator_bus_scale_usecases,
ARRAY_SIZE(rotator_bus_scale_usecases),
.name = "rotator",
};
static struct msm_bus_vectors mdp_init_vectors[] = {
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
#ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY
static struct msm_bus_vectors hdmi_as_primary_vectors[] = {
/* If HDMI is used as primary */
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 2000000000,
.ib = 2000000000,
},
};
static struct msm_bus_paths mdp_bus_scale_usecases[] = {
{
ARRAY_SIZE(mdp_init_vectors),
mdp_init_vectors,
},
{
ARRAY_SIZE(hdmi_as_primary_vectors),
hdmi_as_primary_vectors,
},
{
ARRAY_SIZE(hdmi_as_primary_vectors),
hdmi_as_primary_vectors,
},
{
ARRAY_SIZE(hdmi_as_primary_vectors),
hdmi_as_primary_vectors,
},
{
ARRAY_SIZE(hdmi_as_primary_vectors),
hdmi_as_primary_vectors,
},
{
ARRAY_SIZE(hdmi_as_primary_vectors),
hdmi_as_primary_vectors,
},
};
#else
static struct msm_bus_vectors mdp_ui_vectors[] = {
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 216000000 * 2,
.ib = 270000000 * 2,
},
};
static struct msm_bus_vectors mdp_vga_vectors[] = {
/* VGA and less video */
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 216000000 * 2,
.ib = 270000000 * 2,
},
};
static struct msm_bus_vectors mdp_720p_vectors[] = {
/* 720p and less video */
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 230400000 * 2,
.ib = 288000000 * 2,
},
};
static struct msm_bus_vectors mdp_1080p_vectors[] = {
/* 1080p and less video */
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 334080000 * 2,
.ib = 417600000 * 2,
},
};
static struct msm_bus_paths mdp_bus_scale_usecases[] = {
{
ARRAY_SIZE(mdp_init_vectors),
mdp_init_vectors,
},
{
ARRAY_SIZE(mdp_ui_vectors),
mdp_ui_vectors,
},
{
ARRAY_SIZE(mdp_ui_vectors),
mdp_ui_vectors,
},
{
ARRAY_SIZE(mdp_vga_vectors),
mdp_vga_vectors,
},
{
ARRAY_SIZE(mdp_720p_vectors),
mdp_720p_vectors,
},
{
ARRAY_SIZE(mdp_1080p_vectors),
mdp_1080p_vectors,
},
};
#endif
static struct msm_bus_scale_pdata mdp_bus_scale_pdata = {
mdp_bus_scale_usecases,
ARRAY_SIZE(mdp_bus_scale_usecases),
.name = "mdp",
};
#endif
static int mdp_core_clk_rate_table[] = {
128000000,/*85330000,*/
128000000,/*85330000,*/
160000000,
200000000,
};
struct msm_fb_info_st {
unsigned int width_mm;
unsigned int height_mm;
};
static struct msm_fb_info_st msm_fb_info_data = {
.width_mm = MSM_FB_WIDTH_MM,
.height_mm = MSM_FB_HEIGHT_MM
};
static int msm_fb_event_notify(struct notifier_block *self,
unsigned long action, void *data)
{
struct fb_event *event = data;
struct fb_info *info = event->info;
struct msm_fb_info_st *fb_info_mm = &msm_fb_info_data;
int ret = 0;
switch (action) {
case FB_EVENT_FB_REGISTERED:
info->var.width = fb_info_mm->width_mm;
info->var.height = fb_info_mm->height_mm;
break;
}
return ret;
}
static struct notifier_block msm_fb_event_notifier = {
.notifier_call = msm_fb_event_notify,
};
static struct msm_panel_common_pdata mdp_pdata = {
.gpio = MDP_VSYNC_GPIO,
.mdp_core_clk_rate = 128000000, /*85330000,*/
.mdp_core_clk_table = mdp_core_clk_rate_table,
.num_mdp_clk = ARRAY_SIZE(mdp_core_clk_rate_table),
#ifdef CONFIG_MSM_BUS_SCALING
.mdp_bus_scale_table = &mdp_bus_scale_pdata,
#endif
.mdp_rev = MDP_REV_42,
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
.mem_hid = ION_CP_MM_HEAP_ID,
#else
.mem_hid = MEMTYPE_EBI1,
#endif
.cont_splash_enabled = 0x00,
};
#ifndef CONFIG_MACH_LGE
#ifndef CONFIG_FB_MSM_MIPI_PANEL_DETECT
/**
* Set MDP clocks to high frequency to avoid DSI underflow
* when using high resolution 1200x1920 WUXGA panels
*/
static void set_mdp_clocks_for_wuxga(void)
{
int i;
mdp_ui_vectors[0].ab = 2000000000;
mdp_ui_vectors[0].ib = 2000000000;
mdp_vga_vectors[0].ab = 2000000000;
mdp_vga_vectors[0].ib = 2000000000;
mdp_720p_vectors[0].ab = 2000000000;
mdp_720p_vectors[0].ib = 2000000000;
mdp_1080p_vectors[0].ab = 2000000000;
mdp_1080p_vectors[0].ib = 2000000000;
mdp_pdata.mdp_core_clk_rate = 200000000;
for (i = 0; i < ARRAY_SIZE(mdp_core_clk_rate_table); i++)
mdp_core_clk_rate_table[i] = 200000000;
}
#endif
#endif
void __init msm8960_mdp_writeback(struct memtype_reserve* reserve_table)
{
mdp_pdata.ov0_wb_size = MSM_FB_OVERLAY0_WRITEBACK_SIZE;
mdp_pdata.ov1_wb_size = MSM_FB_OVERLAY1_WRITEBACK_SIZE;
#if defined(CONFIG_ANDROID_PMEM) && !defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
reserve_table[mdp_pdata.mem_hid].size +=
mdp_pdata.ov0_wb_size;
reserve_table[mdp_pdata.mem_hid].size +=
mdp_pdata.ov1_wb_size;
#endif
}
/* LGE_CHANGE
*
* LM3533TMX BL driver for l_dcm
* 2011-11-23 kyunghoo.ryu@lge.com
*/
#ifdef CONFIG_LGE_BACKLIGHT_LM3533
extern void lm3533_lcd_backlight_set_level(int level);
#ifdef CONFIG_FB_MSM_MIPI_DSI_LGIT
static int mipi_lgit_backlight_level(int level, int max, int min)
{
lm3533_lcd_backlight_set_level(level);
return 0;
}
#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
/* LG Display 4.0" WVGA for l_dcm (CMD Mode)
* Rotate Display output by l_dcm h/w implementation
* 2011-11-24 Kyunghoo.ryu@lge.com
*/
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
static char video_switch[] = {0x01, 0x47};
#endif
/* LG-4572B only for Rev.A and Rev.B */
static char hrx_to_old [ 2] = {0x03, 0x00};
static char inversion_off_old [ 2] = {0x20, 0x00};
static char tear_on_old [ 2] = {0x35, 0x00};
static char set_address_mode_old [ 2] = {0x36, 0x02}; /* Flip Horizontal Only (cause Tearing problem) - Kyunghoo.ryu@lge.com */
static char if_pixel_format_old [ 2] = {0x3A, 0x77};
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
static char rgb_interface_setting_old [ ] = {0xB1, 0x06, 0x43, 0x0A};
#endif
static char page_address_set_old [ 5] = {0x2B, 0x00, 0x00, 0x03, 0x1F};
static char panel_char_setting_old [ 3] = {0xB2, 0x00, 0xC8};
static char panel_drive_setting_old [ 2] = {0xB3, 0x00};
static char display_mode_ctrl_old [ 2] = {0xB4, 0x04};
static char display_ctrl1_old [ 6] = {0xB5, 0x42, 0x10, 0x10, 0x00, 0x20};
static char display_ctrl2_old [ 7] = {0xB6, 0x0B, 0x0F, 0x02, 0x40, 0x10, 0xE8};
#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL)
static char display_ctrl3_old [ 6] = {0xB7, 0x48, 0x06, 0x2E, 0x00, 0x00};
#endif
static char osc_setting_old [ 3] = {0xC0, 0x01, 0x15};
static char power_ctrl3_old [ 6] = {0xC3, 0x07, 0x03, 0x04, 0x04, 0x04};
static char power_ctrl4_old [ 7] = {0xC4, 0x12, 0x24, 0x18, 0x18, 0x05, 0x49};
static char power_ctrl5_old [ 2] = {0xC5, 0x69};
static char power_ctrl6_old [ 3] = {0xC6, 0x41, 0x63};
static char exit_sleep_old [ 2] = {0x11, 0x00};
static char display_on_old [ 2] = {0x29, 0x00};
static char enter_sleep_old [ 2] = {0x10, 0x00};
static char display_off_old [ 2] = {0x28, 0x00};
static char deep_standby_old [ 2] = {0xC1, 0x01};
/* LGE_CHANGE_S LG-4573B H/W Rev.C or upper revision, jamin.koo@lge.com, 2011.02.27 */
static char hrx_to [ 2] = {0x03, 0x00};
static char inversion_off [ 1] = {0x20};
static char set_address_mode [ 2] = {0x36, 0x02}; /* Flip Horizontal Only (cause Tearing problem) - Kyunghoo.ryu@lge.com */
static char if_pixel_format [ 2] = {0x3A, 0x70};
/* LGE_CHANGE_S, Add CABC Code, jamin.koo@lge.com, 2012.03.30 */
#ifdef CONFIG_LGE_BACKLIGHT_CABC
static char cabc_51 [ 2] = {0x51,0xE6}; /* LCD CABC CODE, Write Display Brightness */
static char cabc_53 [ 2] = {0x53,0x24}; /* LCD CABC CODE, Write Control Display */
static char cabc_55 [ 2] = {0x55,0x01}; /* LCD CABC CODE, Write Content Adaptive Brightness Control */
static char cabc_5e [ 2] = {0x5E,0x33}; /* LCD CABC CODE, Write CABC Minimum Brightness */
#ifdef CONFIG_LGE_BACKLIGHT_CABC_DEBUG
/* Write Display Brightness */
static char config_cabc_51[6][2] = {
{0x51, 0x00}, /* off */
{0x51, 0xE6}, /* 10%, 230 */
{0x51, 0xCC}, /* 20%, 204 */
{0x51, 0xB3}, /* 30%, 179 */
{0x51, 0x99}, /* 40%, 153 */
{0x51, 0x80} /* 50%, 128 */
};
/* Write Control Display */
static char config_cabc_53[2][2] = {
{0x53, 0x00}, /* off */
{0x53, 0x24} /* on */
};
/* Write Content Adaptive Brightness Control */
static char config_cabc_55[2][2] = {
{0x55, 0x00}, /* off */
{0x55, 0x01} /* on */
};
/* Write CABC Minimum Brightness */
static char config_cabc_5e[6][2] = {
{0x5E, 0x00}, /* off */
{0x5E, 0x33}, /* 10% */
{0x5E, 0x33}, /* 20% */
{0x5E, 0x33}, /* 30% */
{0x5E, 0x33}, /* 40% */
{0x5E, 0x33} /* 50% */
};
#endif /* CONFIG_LGE_BACKLIGHT_CABC_DEBUG */
#endif /* CONFIG_LGE_BACKLIGHT_CABC */
/* LGE_CHANGE_E, Add CABC Code, jamin.koo@lge.com, 2012.03.30 */
static char rgb_interface_setting [ 4] = {0xB1, 0x06, 0x43, 0x0A};
static char panel_char_setting [ 3] = {0xB2, 0x00, 0xC8};
static char panel_drive_setting [ 2] = {0xB3, 0x00};
static char display_mode_ctrl [ 2] = {0xB4, 0x04};
static char display_ctrl1 [ 6] = {0xB5, 0x40, 0x18, 0x02, 0x00, 0x01};
static char display_ctrl2 [ 7] = {0xB6, 0x0B, 0x0F, 0x02, 0x40, 0x10, 0xE8};
static char osc_setting [ 3] = {0xC0, 0x01, 0x18};
static char power_ctrl3 [ 6] = {0xC3, 0x07, 0x0A, 0x0A, 0x0A, 0x02};
static char power_ctrl4 [ 7] = {0xC4, 0x12, 0x24, 0x18, 0x18, 0x04, 0x49};
static char power_ctrl5 [ 2] = {0xC5, 0x6B};
static char power_ctrl6 [ 4] = {0xC6, 0x41, 0x63, 0x03};
static char p_gamma_r_setting [10] = {0xD0, 0x00, 0x01, 0x64, 0x25, 0x07, 0x02, 0x61, 0x13, 0x03};
static char n_gamma_r_setting [10] = {0xD1, 0x00, 0x01, 0x64, 0x25, 0x07, 0x02, 0x61, 0x13, 0x03};
static char p_gamma_g_setting [10] = {0xD2, 0x00, 0x01, 0x64, 0x25, 0x07, 0x02, 0x61, 0x13, 0x03};
static char n_gamma_g_setting [10] = {0xD3, 0x00, 0x01, 0x64, 0x25, 0x07, 0x02, 0x61, 0x13, 0x03};
static char p_gamma_b_setting [10] = {0xD4, 0x00, 0x01, 0x64, 0x25, 0x07, 0x02, 0x61, 0x13, 0x03};
static char n_gamma_b_setting [10] = {0xD5, 0x00, 0x01, 0x64, 0x25, 0x07, 0x02, 0x61, 0x13, 0x03};
static char exit_sleep [ 1] = {0x11};
static char display_on [ 1] = {0x29};
static char enter_sleep [ 1] = {0x10};
static char display_off [ 1] = {0x28};
/* LGE_CHANGE_E LG-4573B H/W Rev.C or upper revision, jamin.koo@lge.com, 2011.02.27 */
/* LG-4572B only for Rev.A and Rev.B */
/* initialize device */
static struct dsi_cmd_desc lgit_power_on_set_old[] = {
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(video_switch), video_switch},
#endif
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(hrx_to_old), hrx_to_old},
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(inversion_off_old), inversion_off_old},
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(tear_on_old), tear_on_old},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_address_mode_old), set_address_mode_old},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(if_pixel_format_old), if_pixel_format_old},
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(rgb_interface_setting_old), rgb_interface_setting_old},
#endif
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(page_address_set_old), page_address_set_old},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(panel_char_setting_old), panel_char_setting_old},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(panel_drive_setting_old), panel_drive_setting_old},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(display_mode_ctrl_old), display_mode_ctrl_old},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(display_ctrl1_old), display_ctrl1_old},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(display_ctrl2_old), display_ctrl2_old},
#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL)
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(display_ctrl3_old), display_ctrl3_old},
#endif
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(osc_setting_old), osc_setting_old},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(power_ctrl3_old), power_ctrl3_old},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(power_ctrl4_old), power_ctrl4_old},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(power_ctrl5_old), power_ctrl5_old},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(power_ctrl6_old), power_ctrl6_old},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(p_gamma_r_setting), p_gamma_r_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(n_gamma_r_setting), n_gamma_r_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(p_gamma_g_setting), p_gamma_g_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(n_gamma_g_setting), n_gamma_g_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(p_gamma_b_setting), p_gamma_b_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(n_gamma_b_setting), n_gamma_b_setting},
{DTYPE_DCS_WRITE, 1, 0, 0, 100, sizeof(exit_sleep_old), exit_sleep_old},
{DTYPE_DCS_WRITE, 1, 0, 0, 100, sizeof(display_on_old), display_on_old},
};
static struct dsi_cmd_desc lgit_power_off_set_old[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(display_off_old), display_off_old},
{DTYPE_DCS_WRITE, 1, 0, 0, 60, sizeof(enter_sleep_old), enter_sleep_old},
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(deep_standby_old), deep_standby_old},
};
/* LGE_CHANGE_S LG-4573B H/W Rev.C or upper revision, jamin.koo@lge.com, 2011.02.27 */
/* initialize device */
static struct dsi_cmd_desc lgit_power_on_set[] = {
{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(hrx_to), hrx_to},
{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(inversion_off), inversion_off},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_address_mode), set_address_mode},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(if_pixel_format), if_pixel_format},
/* LGE_CHANGE_S, Add CABC Code, jamin.koo@lge.com, 2012.03.30 */
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(cabc_51), cabc_51},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(cabc_53), cabc_53},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(cabc_55), cabc_55},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(cabc_5e), cabc_5e},
#endif /* CONFIG_LGE_BACKLIGHT_CABC */
/* LGE_CHANGE_E, Add CABC Code, jamin.koo@lge.com, 2012.03.30 */
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(rgb_interface_setting), rgb_interface_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(panel_char_setting), panel_char_setting},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(panel_drive_setting), panel_drive_setting},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(display_mode_ctrl), display_mode_ctrl},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(display_ctrl1), display_ctrl1},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(display_ctrl2), display_ctrl2},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(osc_setting), osc_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(power_ctrl3), power_ctrl3},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(power_ctrl4), power_ctrl4},
{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(power_ctrl5), power_ctrl5},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(power_ctrl6), power_ctrl6},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(p_gamma_r_setting), p_gamma_r_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(n_gamma_r_setting), n_gamma_r_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(p_gamma_g_setting), p_gamma_g_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(n_gamma_g_setting), n_gamma_g_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(p_gamma_b_setting), p_gamma_b_setting},
{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(n_gamma_b_setting), n_gamma_b_setting},
{DTYPE_DCS_WRITE, 1, 0, 0, 120, sizeof(exit_sleep), exit_sleep},
{DTYPE_DCS_WRITE, 1, 0, 0, 40, sizeof(display_on), display_on},
};
/* LGE_CHANGE_E LG-4573B H/W Rev.C or upper revision, jamin.koo@lge.com, 2011.02.27 */
static struct dsi_cmd_desc lgit_power_off_set[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 40, sizeof(display_off), display_off},
{DTYPE_DCS_WRITE, 1, 0, 0, 10, sizeof(enter_sleep), enter_sleep},
};
#if defined(CONFIG_LGE_BACKLIGHT_CABC) && \
defined(CONFIG_LGE_BACKLIGHT_CABC_DEBUG)
void set_lgit_cabc(int cabc_index)
{
pr_info("%s! cabc_index: %d\n", __func__, cabc_index);
switch(cabc_index) {
case 0: /* CABC OFF */
lgit_power_on_set[CABC_POWERON_OFFSET+2].payload = config_cabc_55[CABC_OFF];
break;
case 1: /* 10% */
case 2: /* 20% */
case 3: /* 30% */
case 4: /* 40% */
case 5: /* 50% */
{ /* CABC ON */
lgit_power_on_set[CABC_POWERON_OFFSET].payload = config_cabc_51[cabc_index];
lgit_power_on_set[CABC_POWERON_OFFSET+1].payload = config_cabc_53[CABC_ON];
lgit_power_on_set[CABC_POWERON_OFFSET+2].payload = config_cabc_55[CABC_ON];
lgit_power_on_set[CABC_POWERON_OFFSET+3].payload = config_cabc_5e[cabc_index];
}
break;
default:
printk("out of range cabc_index %d", cabc_index);
return;
}
lgit_cabc_index = cabc_index;
return;
}
EXPORT_SYMBOL(set_lgit_cabc);
int get_lgit_cabc(void)
{
return lgit_cabc_index;
}
EXPORT_SYMBOL(get_lgit_cabc);
#endif /* CONFIG_LGE_BACKLIGHT_CABC && CONFIG_LGE_BACKLIGHT_CABC_DEBUG */
/* LG-4572B only for Rev.A and Rev.B */
static struct msm_panel_common_pdata mipi_lgit_pdata_old = {
.backlight_level = mipi_lgit_backlight_level,
#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
.power_on_set = lgit_power_on_set_old,
.power_off_set = lgit_power_off_set_old,
.power_on_set_size = ARRAY_SIZE(lgit_power_on_set_old),
.power_off_set_size = ARRAY_SIZE(lgit_power_off_set_old),
.max_backlight_level = 0xFF,
#endif
#if defined (CONFIG_LGE_BACKLIGHT_LM3530)
.max_backlight_level = 0x71,
#elif defined (CONFIG_LGE_BACKLIGHT_LM3533)
.max_backlight_level = 0xFF,
#endif
};
/* LGE_CHANGE_S LG-4573B H/W Rev.C or upper revision, jamin.koo@lge.com, 2011.02.27 */
static struct msm_panel_common_pdata mipi_lgit_pdata = {
.backlight_level = mipi_lgit_backlight_level,
#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) ||\
defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
.power_on_set = lgit_power_on_set,
.power_off_set = lgit_power_off_set,
.power_on_set_size = ARRAY_SIZE(lgit_power_on_set),
.power_off_set_size = ARRAY_SIZE(lgit_power_off_set),
.max_backlight_level = 0xFF,
#endif
/* LGE_CHANGE_E LG-4573B H/W Rev.C or upper revision, jamin.koo@lge.com, 2011.02.27 */
#if defined (CONFIG_LGE_BACKLIGHT_LM3530)
.max_backlight_level = 0x71,
#elif defined (CONFIG_LGE_BACKLIGHT_LM3533)
.max_backlight_level = 0xFF,
#endif
};
static struct platform_device mipi_dsi_lgit_panel_device = {
.name = "mipi_lgit",
.id = 0,
.dev = {
.platform_data = &mipi_lgit_pdata,
}
};
#endif
#endif
#ifdef CONFIG_LGE_KCAL
extern int set_kcal_values(int kcal_r, int kcal_g, int kcal_b);
extern int refresh_kcal_display(void);
extern int get_kcal_values(int *kcal_r, int *kcal_g, int *kcal_b);
static struct kcal_platform_data kcal_pdata = {
.set_values = set_kcal_values,
.get_values = get_kcal_values,
.refresh_display = refresh_kcal_display
};
static struct platform_device kcal_platrom_device = {
.name = "kcal_ctrl",
.dev = {
.platform_data = &kcal_pdata,
}
};
#endif
#endif
#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
static struct platform_device wfd_panel_device = {
.name = "wfd_panel",
.id = 0,
.dev.platform_data = NULL,
};
static struct platform_device wfd_device = {
.name = "msm_wfd",
.id = -1,
};
#endif
#ifdef CONFIG_MSM_BUS_SCALING
static struct msm_bus_vectors dtv_bus_init_vectors[] = {
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors dtv_bus_def_vectors[] = {
{
.src = MSM_BUS_MASTER_MDP_PORT0,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 566092800 * 2,
.ib = 707616000 * 2,
},
};
static struct msm_bus_paths dtv_bus_scale_usecases[] = {
{
ARRAY_SIZE(dtv_bus_init_vectors),
dtv_bus_init_vectors,
},
{
ARRAY_SIZE(dtv_bus_def_vectors),
dtv_bus_def_vectors,
},
};
static struct msm_bus_scale_pdata dtv_bus_scale_pdata = {
dtv_bus_scale_usecases,
ARRAY_SIZE(dtv_bus_scale_usecases),
.name = "dtv",
};
static struct lcdc_platform_data dtv_pdata = {
.bus_scale_table = &dtv_bus_scale_pdata,
};
#endif
static struct gpiomux_setting mdp_vsync_suspend_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_DOWN,
};
static struct gpiomux_setting mdp_vsync_active_cfg = {
.func = GPIOMUX_FUNC_1,
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_DOWN,
};
static struct msm_gpiomux_config msm8960_mdp_vsync_configs[] __initdata = {
{
.gpio = MDP_VSYNC_GPIO,
.settings = {
[GPIOMUX_ACTIVE] = &mdp_vsync_active_cfg,
[GPIOMUX_SUSPENDED] = &mdp_vsync_suspend_cfg,
},
}
};
#ifdef CONFIG_LGE_HIDDEN_RESET
int lge_get_fb_phys_info(unsigned long *start, unsigned long *size)
{
if (!start || !size)
return -1;
*start = (unsigned long)msm_fb_resources[0].start;
*size = (unsigned long)(LCD_RESOLUTION_X * LCD_RESOLUTION_Y * 4);
return 0;
}
void *lge_get_hreset_fb_phys_addr(void)
{
return (void *)0x88A00000;
}
#endif
static void __init msm_fb_add_devices(void)
{
#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
platform_device_register(&wfd_panel_device);
platform_device_register(&wfd_device);
#endif
if (machine_is_msm8x60_rumi3()) {
msm_fb_register_device("mdp", NULL);
mipi_dsi_pdata.target_type = 1;
} else
msm_fb_register_device("mdp", &mdp_pdata);
msm_fb_register_device("mipi_dsi", &mipi_dsi_pdata);
#ifdef CONFIG_MSM_BUS_SCALING
msm_fb_register_device("dtv", &dtv_pdata);
#endif
}
void __init msm8960_allocate_fb_region(void)
{
void *addr;
unsigned long size;
size = MSM_FB_SIZE;
addr = alloc_bootmem_align(size, 0x1000);
msm_fb_resources[0].start = __pa(addr);
msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1;
pr_info("allocating %lu bytes at %p (%lx physical) for fb\n",
size, addr, __pa(addr));
}
void __init msm8960_set_display_params(char *prim_panel, char *ext_panel)
{
if (strnlen(prim_panel, PANEL_NAME_MAX_LEN)) {
strlcpy(msm_fb_pdata.prim_panel_name, prim_panel,
PANEL_NAME_MAX_LEN);
pr_debug("msm_fb_pdata.prim_panel_name %s\n",
msm_fb_pdata.prim_panel_name);
if (!strncmp((char *)msm_fb_pdata.prim_panel_name,
HDMI_PANEL_NAME, strnlen(HDMI_PANEL_NAME,
PANEL_NAME_MAX_LEN))) {
pr_debug("HDMI is the primary display by"
" boot parameter\n");
hdmi_is_primary = 1;
}
}
if (strnlen(ext_panel, PANEL_NAME_MAX_LEN)) {
strlcpy(msm_fb_pdata.ext_panel_name, ext_panel,
PANEL_NAME_MAX_LEN);
pr_debug("msm_fb_pdata.ext_panel_name %s\n",
msm_fb_pdata.ext_panel_name);
}
}
#ifdef CONFIG_I2C
#ifdef CONFIG_LGE_BACKLIGHT_LM3533
#define LM3533_BACKLIGHT_ADDRESS 0x36
struct backlight_platform_data {
void (*platform_init)(void);
int gpio;
unsigned int mode;
int max_current;
int init_on_boot;
int min_brightness;
int max_brightness;
int default_brightness;
int factory_brightness;
};
#if defined(CONFIG_LGE_BACKLIGHT_CABC)
#define PWM_SIMPLE_EN 0xA0
#endif
static struct backlight_platform_data lm3533_data = {
.gpio = PM8921_GPIO_PM_TO_SYS(24),
.max_current = 0x13,
.min_brightness = 0x05,
.max_brightness = 0xFF,
.default_brightness = 0x91,
.factory_brightness = 0x64,
};
static struct i2c_board_info msm_i2c_backlight_info[] = {
{
I2C_BOARD_INFO("lm3533", LM3533_BACKLIGHT_ADDRESS),
.platform_data = &lm3533_data,
}
};
static struct i2c_registry l_dcm_i2c_backlight_device __initdata = {
I2C_SURF | I2C_FFA | I2C_FLUID | I2C_RUMI,
MSM_8960_GSBI2_QUP_I2C_BUS_ID,
msm_i2c_backlight_info,
ARRAY_SIZE(msm_i2c_backlight_info),
};
#endif /* CONFIG_LGE_BACKLIGHT_LM3533 */
#endif /* CONFIG_I2C */
static int __init panel_gpiomux_init(void)
{
int rc;
rc = msm_gpiomux_init(NR_GPIO_IRQS);
if (rc == -EPERM) {
pr_info("%s : msm_gpiomux_init is already initialized\n",
__func__);
} else if (rc) {
pr_err(KERN_ERR "msm_gpiomux_init failed %d\n", rc);
return rc;
}
msm_gpiomux_install(msm8960_mdp_vsync_configs,
ARRAY_SIZE(msm8960_mdp_vsync_configs));
return 0;
}
static struct platform_device *l_dcm_panel_devices[] __initdata = {
#ifdef CONFIG_FB_MSM_MIPI_DSI_LGIT
&mipi_dsi_lgit_panel_device,
#ifdef CONFIG_LGE_LCD_TUNING
&lcd_misc_device,
#endif
#endif
#ifdef CONFIG_LGE_KCAL
&kcal_platrom_device,
#endif
};
void __init lge_add_lcd_devices(void)
{
panel_gpiomux_init();
fb_register_client(&msm_fb_event_notifier);
/* LGE_CHANGE_S, Assign command set to panel info as H/W revision, jamin.koo@lge.com, 2011.02.27 */
if(lge_get_board_revno() < HW_REV_C)
mipi_dsi_lgit_panel_device.dev.platform_data = &mipi_lgit_pdata_old;
/* LGE_CHANGE_E, Assign command set to panel info as H/W revision, jamin.koo@lge.com, 2011.02.27 */
platform_add_devices(l_dcm_panel_devices,
ARRAY_SIZE(l_dcm_panel_devices));
#ifdef CONFIG_LGE_BACKLIGHT_LM3533
lge_add_msm_i2c_device(&l_dcm_i2c_backlight_device);
#endif
msm_fb_add_devices();
platform_device_register(&msm_fb_device);
}
| jameskdev/lge-kernel-d1l_kr | arch/arm/mach-msm/lge/l_dcm/board-l_dcm-panel.c | C | gpl-2.0 | 35,276 |
/*
* Copyright (c) 2009, Yauhen Kharuzhy <jekhor@gmail.com>
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <command.h>
#include <mmc.h>
#include <part.h>
#include <fat.h>
#include <firmware_update.h>
#include <linux/mtd/mtd.h>
#include <nand.h>
#include <linux/mtd/partitions.h>
#include <linux/list.h>
#include <ubi_uboot.h>
#include <jffs2/load_kernel.h>
#include <i2c.h>
#include <lcd.h>
#include <linux/time.h>
#ifdef crc32
#undef crc32
#endif
#define N516_KEY_C 0x1d
#define N516_KEY_MENU 0x0e
#define N516_KEY_POWER 0x1c
#define N516_KEY_1 0x04
#define KEY_RESERVED 0
#define KEY_ESC 1
#define KEY_1 2
#define KEY_2 3
#define KEY_3 4
#define KEY_4 5
#define KEY_5 6
#define KEY_6 7
#define KEY_7 8
#define KEY_8 9
#define KEY_9 10
#define KEY_0 11
#define KEY_ENTER 28
#define KEY_SPACE 57
#define KEY_UP 103
#define KEY_PAGEUP 104
#define KEY_LEFT 105
#define KEY_RIGHT 106
#define KEY_DOWN 108
#define KEY_PAGEDOWN 109
#define KEY_POWER 116
#define KEY_MENU 139
#define KEY_SLEEP 142
#define KEY_WAKEUP 143
#define KEY_DIRECTION 153
#define KEY_PLAYPAUSE 164
#define KEY_SEARCH 217
struct fw_update_head {
char magic[4];
u32 header_size;
} __attribute__((packed));
struct block_properties {
unsigned int raw:1;
u32 crc32;
char *name;
u64 offset;
u64 size;
};
static struct ubi_device *ubi;
static u32 __get_unaligned_le32(unsigned char *p)
{
return (u32)p[0] | ((u32)p[1] << 8) | ((u32)p[2] << 16) | ((u32)p[3] << 24);
}
static u64 __get_unaligned_le64(unsigned char *p)
{
return (u64)p[0] | ((u64)p[1] << 8) | ((u64)p[2] << 16) | ((u64)p[3] << 24) |
((u64)p[4] << 32) | ((u64)p[5] << 40) | ((u64)p[6] << 48) | ((u64)p[7] << 56);
}
#define log(msg, args...) \
{ \
eprintf(msg, ##args); \
printf(msg, ##args); \
} while (0)
#define show_progress(msg, args...) \
{ \
printf("\r" msg "\n", ##args); \
} while(0)
static int ubi_initialize(void)
{
struct mtd_info *master;
struct mtd_device *dev;
struct mtd_partition mtd_part;
struct part_info *part;
char buffer[20];
u8 pnum;
int err;
if (ubi_devices[0]) {
ubi = ubi_devices[0];
return 0;
// ubi_exit();
// del_mtd_partitions(&nand_info[0]);
}
if (mtdparts_init() != 0) {
printf("Error initializing mtdparts!\n");
return 1;
}
master = &nand_info[0];
if (find_dev_and_part(CONFIG_UBI_PARTITION, &dev, &pnum, &part) != 0)
return 1;
sprintf(buffer, "mtd=%d", pnum);
memset(&mtd_part, 0, sizeof(mtd_part));
mtd_part.name = buffer;
mtd_part.size = part->size;
mtd_part.offset = part->offset;
add_mtd_partitions(master, &mtd_part, 1);
err = ubi_mtd_param_parse(buffer, NULL);
if (err) {
del_mtd_partitions(master);
return err;
}
err = ubi_init();
if (err) {
del_mtd_partitions(master);
return err;
}
ubi = ubi_devices[0];
return 0;
}
static int init_fat(void)
{
block_dev_desc_t *dev_desc;
int part = 1;
struct mmc *mmc;
mmc = find_mmc_device(0);
if (!mmc)
return -1;
if (mmc_init(mmc))
return -1;
dev_desc = get_dev("mmc", 0);
if (dev_desc==NULL) {
printf("\nERROR: Invalid mmc device. Please check your SD/MMC card.\n");
return -1;
}
if (fat_register_device(dev_desc, part)!=0) {
printf("\nERROR: Unable to use %s %d:%d for update. Please check or replace your card.\n", "mmc", 0, part);
return -1;
}
return 0;
}
static int fw_load(char *filename, unsigned char *buf, unsigned long size, unsigned long offset)
{
if (init_fat() < 0)
return -1;
printf("Reading file %s (0x%lx bytes from 0x%lx) offset\n", filename, size, offset);
return file_fat_read(filename, buf, size, offset);
}
static int check_global_property(char *name, char *val, unsigned long len)
{
long t;
char date[32];
if (!strcmp(name, "device")) {
if (strcmp(val, CONFIG_BOARD_NAME))
return -1;
} else if (!strcmp(name, "hwrev")) {
if (strcmp(val, CONFIG_BOARD_HWREV))
return -1;
} else if (!strcmp(name, "description")) {
log("Description:\n %s\n", val);
} else if (!strcmp(name, "date")) {
t = simple_strtoul(val, NULL, 10);
ctime_r(&t, date);
log("Firmware date:\n %s\n", date);
} else if (!strcmp(name, "epoch")) {
if (strcmp(val, CONFIG_FIRMWARE_EPOCH))
return -1;
}
return 0;
}
static int check_block_property(char *block_name, char *name, char *val,
unsigned long len, struct block_properties *block_prop)
{
if (!strcmp(name, "raw") && !strcmp(val, "yes"))
block_prop->raw = 1;
if (!strcmp(name, "crc32"))
block_prop->crc32 = __get_unaligned_le32((unsigned char *)val);
return 0;
}
static unsigned long process_all_properties(unsigned char *start, char *block_name,
int dry_run, int *image_valid, struct block_properties *block_prop)
{
u32 property_name_len;
u32 property_val_len;
char *property_name, *property_val;
unsigned char *buf = start;
int res;
property_name_len = __get_unaligned_le32(buf);
buf += 4;
property_val_len = __get_unaligned_le32(buf);
buf += 4;
while (property_name_len) {
property_name = (char *)buf;
buf += property_name_len;
property_val = (char *)buf;
buf += property_val_len;
if ((property_name[property_name_len - 1] == '\0') &&
strncmp(property_name, "crc32", 5))
printf("%s: %s\n", property_name, property_val);
else {
int i;
printf("%s:", property_name);
for (i = 0; i < property_val_len; i++)
printf(" %02x", (u8)property_val[i]);
puts("\n");
}
if (!block_name)
res = check_global_property(property_name, property_val,
property_val_len);
else
res = check_block_property(block_name, property_name, property_val,
property_val_len, block_prop);
if (res != 0)
*image_valid = 0;
property_name_len = __get_unaligned_le32(buf);
buf += 4;
property_val_len = __get_unaligned_le32(buf);
buf += 4;
}
return buf - start;
}
static struct update_layout_entry *get_block_flash_layout(char *block_name)
{
int i;
for (i = 0; i < ARRAY_SIZE(nand_layout); i++) {
if (!strcmp(nand_layout[i].name, block_name))
return &nand_layout[i];
}
return NULL;
}
static int flash_chunk(u64 offset, unsigned char *buf, size_t count)
{
nand_info_t *nand;
#ifdef CONFIG_SYS_NAND_SELECT_DEVICE
board_nand_select_device(nand_info[0].priv, 0);
#endif
nand = &nand_info[0];
count = roundup(count, nand->writesize);
printf("Flashing chunk to offset 0x%08x, count 0x%x...\n", (u32)offset, count);
return nand_write_skip_bad(nand, offset, &count, buf);
}
static int erase_flash(u64 offset, u64 size)
{
nand_erase_options_t opts;
nand_info_t *nand;
#ifdef CONFIG_SYS_NAND_SELECT_DEVICE
board_nand_select_device(nand_info[0].priv, 0);
#endif
nand = &nand_info[0];
memset(&opts, 0, sizeof(opts));
opts.offset = offset;
opts.length = size;
opts.jffs2 = 0;
opts.quiet = 0;
opts.scrub = 0;
return nand_erase_opts(nand, &opts);
}
static int process_block_raw(char *filename, struct block_properties *block_prop, int dry_run)
{
unsigned char *buf = (unsigned char *)CONFIG_UPDATE_TMPBUF;
u64 bytes_read = 0, bytes_remain = block_prop->size;
struct update_layout_entry *layout;
u64 flash_address;
u32 block_crc32;
layout = get_block_flash_layout(block_prop->name);
if (!layout) {
log("Cannot find layout for block '%s', skipping it\n", block_prop->name);
return 1;
}
log("Flashing `%s'", block_prop->name);
show_progress("Erasing flash...");
if (!dry_run)
erase_flash(layout->offset, layout->size);
else
printf("Not erasing flash (dry run)\n");
flash_address = layout->offset;
block_crc32 = 0;
while (bytes_remain) {
unsigned long chunksize = min(CONFIG_UPDATE_CHUNKSIZE, bytes_remain);
long res;
if (bytes_remain < CONFIG_UPDATE_CHUNKSIZE)
memset(buf, 0xff, CONFIG_UPDATE_CHUNKSIZE);
show_progress("Reading...\t(%u%%)", (unsigned int)(bytes_read * 100 / block_prop->size));
res = fw_load(filename, buf, chunksize, block_prop->offset + bytes_read);
if (res < 0)
{
log("\nFailed to read file %s\n", filename);
return -1;
}
block_crc32 = crc32(block_crc32, buf, chunksize);
bytes_read += res;
bytes_remain -= res;
show_progress("Flashing...\t(%u%%)", (unsigned int)(bytes_read * 100 / block_prop->size));
if (!dry_run)
flash_chunk(flash_address, buf, res);
else
printf("Not flashing (dry run) chunk to offset 0x%08x...\n", (u32)flash_address);
flash_address += res;
}
log("\n");
if (block_prop->crc32 != block_crc32)
log("Invalid CRC for block %s\n", block_prop->name);
return 0;
}
static int process_block_ubivol(char *filename, struct block_properties *block_prop, int dry_run)
{
unsigned char *buf = (unsigned char *)CONFIG_UPDATE_TMPBUF;
u64 bytes_read = 0, bytes_remain = block_prop->size;
int i = 0, err = 0;
int rsvd_bytes = 0;
int found = 0;
struct ubi_volume *vol;
u32 block_crc32;
log("Flashing firmware part `%s':\n", block_prop->name);
if (!ubi) {
err = ubi_initialize();
if (err) {
log("ERROR: UBI initialization failed\n");
return err;
}
}
for (i = 0; i < ubi->vtbl_slots; i++) {
vol = ubi->volumes[i];
if (vol && !strcmp(vol->name, block_prop->name)) {
printf("Volume \"%s\" is found at volume id %d\n", block_prop->name, i);
found = 1;
break;
}
}
if (!found) {
log("ERROR: Volume \"%s\" is not found\n", block_prop->name);
err = 1;
goto out;
}
rsvd_bytes = vol->reserved_pebs * (ubi->leb_size - vol->data_pad);
if (block_prop->size > rsvd_bytes) {
printf("rsvd_bytes=%d vol->reserved_pebs=%d ubi->leb_size=%d\n",
rsvd_bytes, vol->reserved_pebs, ubi->leb_size);
printf("vol->data_pad=%d\n", vol->data_pad);
log("ERROR: Size of block is greater than volume size.\n");
err = -1;
goto out;
}
show_progress("Preparing...");
if (!dry_run) {
err = ubi_start_update(ubi, vol, block_prop->size);
if (err < 0) {
log("Cannot start volume update\n");
goto out;
}
}
block_crc32 = 0;
while (bytes_remain) {
unsigned long chunksize = min(CONFIG_UPDATE_CHUNKSIZE, bytes_remain);
long res;
show_progress("Reading...\t(%u%%)", (unsigned int)(bytes_read * 100 / block_prop->size));
res = fw_load(filename, buf, chunksize, block_prop->offset + bytes_read);
if (res < 0)
{
log("\nERROR: Failed to read file %s\n", filename);
return -1;
}
block_crc32 = crc32(block_crc32, buf, res);
bytes_read += res;
bytes_remain -= res;
show_progress("Flashing...\t(%u%%)", (unsigned int)(bytes_read * 100 / block_prop->size));
if (!dry_run) {
err = ubi_more_update_data(ubi, vol, buf, res);
if (err < 0) {
log("\nERROR: Failed to write data to UBI volume\n");
goto out;
}
}
}
log("\n");
if (block_prop->crc32 != block_crc32)
log("ERROR: Invalid CRC for block %s\n", block_prop->name);
if (err && !dry_run) {
err = ubi_check_volume(ubi, vol->vol_id);
if ( err < 0 )
goto out;
if (err) {
ubi_warn("volume %d on UBI device %d is corrupted",
vol->vol_id, ubi->ubi_num);
vol->corrupted = 1;
}
vol->checked = 1;
ubi_gluebi_updated(vol);
}
out:
return err;
}
static void ubi_cleanup(void)
{
ubi_exit();
del_mtd_partitions(&nand_info[0]);
}
static int process_update(char *filename, int dry_run)
{
struct fw_update_head fw_head;
unsigned char *header;
unsigned char *p;
u32 block_name_len;
u64 block_offset, block_size;
char *block_name;
int image_valid = 1;
int ret = 0;
if (fw_load(filename, (unsigned char *)&fw_head, sizeof(fw_head), 0) < 0)
{
log("Failed to load file %s\n", filename);
return 1;
}
fw_head.header_size = __le32_to_cpu(fw_head.header_size);
header = malloc(fw_head.header_size);
if (!header) {
puts("Failed to allocate memory for firmware update header\n");
return 1;
}
if (fw_load(filename, header, fw_head.header_size, 0) < 0)
{
log("Failed to load file %s\n", filename);
return 1;
}
p = header + sizeof(fw_head);
printf("Global properties:\n");
p += process_all_properties(p, NULL, dry_run, &image_valid, NULL);
if (!image_valid) {
log("Update image is not valid for this device\n");
ret = -1;
goto out;
}
log("\n"); /* Empty line for better output */
while (p < header + fw_head.header_size) {
struct block_properties block_prop;
block_name_len = __get_unaligned_le32(p);
p += 4;
block_offset = __get_unaligned_le64(p);
p += 8;
block_size = __get_unaligned_le64(p);
p += 8;
if (!block_name_len)
break;
memset(&block_prop, 0x00, sizeof(block_prop));
block_name = (char *)p;
p += block_name_len;
printf("Block '%s', offset: %lu, size: %lu\n", block_name,
(unsigned long)block_offset, (unsigned long)block_size);
printf("Block properties:\n");
block_prop.name = block_name;
block_prop.offset = block_offset;
block_prop.size = block_size;
p += process_all_properties(p, block_name, dry_run, &image_valid, &block_prop);
if (image_valid) {
if (block_prop.raw)
ret = process_block_raw(filename, &block_prop, dry_run);
else
ret = process_block_ubivol(filename, &block_prop, dry_run);
if (ret < 0) {
log("Error occured during flashing block `%s'\n", block_name);
goto out;
}
} else {
printf("Block '%s' is not valid for this device, skipping it\n", block_name);
image_valid = 1;
}
}
out:
// if (ubi)
// ubi_cleanup();
// ubi = NULL;
return ret;
}
int do_updatesim(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[])
{
return process_update(CONFIG_UPDATE_FILENAME, 1);
}
U_BOOT_CMD(
updatesim, 1, 0, do_updatesim,
"Simulate firmware update (dry run)",
" - load firmware update file from SD card and parse it without flashing\n"
);
int do_update(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[])
{
return process_update(CONFIG_UPDATE_FILENAME, 0);
}
U_BOOT_CMD(
update, 1, 0, do_update,
"Do firmware update",
" - load firmware update file from SD card, parse and flash it\n"
);
static const unsigned int keymap[][2] = {
{0x05, KEY_0},
{0x04, KEY_1},
{0x03, KEY_2},
{0x02, KEY_3},
{0x01, KEY_4},
{0x0b, KEY_5},
{0x0a, KEY_6},
{0x09, KEY_7},
{0x08, KEY_8},
{0x07, KEY_9},
{0x1a, KEY_PAGEUP},
{0x19, KEY_PAGEDOWN},
{0x17, KEY_LEFT},
{0x16, KEY_RIGHT},
{0x14, KEY_UP},
{0x15, KEY_DOWN},
{0x13, KEY_ENTER},
{0x11, KEY_SPACE},
{0x0e, KEY_MENU},
{0x10, KEY_DIRECTION},
{0x0f, KEY_SEARCH},
{0x0d, KEY_PLAYPAUSE},
{0x1d, KEY_ESC},
{0x1c, KEY_POWER},
{0x1e, KEY_SLEEP},
{0x1f, KEY_WAKEUP},
};
static int find_key(unsigned char code)
{
int i;
for (i = 0; i < ARRAY_SIZE(keymap); i++) {
if (keymap[i][0] == code) {
return keymap[i][1];
}
}
return -1;
}
static int key_to_number(int key)
{
if ((key >= KEY_1) && (key <= KEY_0))
return (key - KEY_1 + 1) % 10;
else
return -1;
}
#define KEYPRESS_TIMEOUT 5000000
#define BLINK_PERIOD 300000
#define I2C_DELAY 70000
static int check_for_menu_key(void)
{
uchar code;
int key;
unsigned int t;
log(" Press any key to enter update mode\n");
/* Switch LPC to normal mode */
code = 0x02;
i2c_write(CONFIG_LPC_I2C_ADDR, 0, 0, &code, 1);
t = 0;
while (t < KEYPRESS_TIMEOUT) {
__gpio_clear_pin(GPIO_LED_EN);
udelay(BLINK_PERIOD / 2);
__gpio_set_pin(GPIO_LED_EN);
udelay(BLINK_PERIOD / 2 - I2C_DELAY);
t += BLINK_PERIOD;
key = -1;
do {
char buf[30];
if (i2c_read(CONFIG_LPC_I2C_ADDR, 0, 0, &code, 1))
break;
if ((code >= 0x81) && (code <= 0x87)) {
sprintf(buf, "n516-lpc.batt_level=%d", code - 0x81);
setenv("batt_level_param", buf);
}
key = find_key(code);
} while ((key < 0) && code);
if (key > 0)
break;
}
if (key == KEY_POWER) {
lcd_clear();
lcd_sync();
code = 0x01;
__gpio_set_pin(GPIO_LED_EN);
while (1)
i2c_write(CONFIG_LPC_I2C_ADDR, 0, 0, &code, 1);
}
if (key > 0)
return 0;
else
return -1;
}
extern void metronome_disable_sync(void);
extern void metronome_enable_sync(void);
static struct list_head found_files;
struct file_entry {
char filename[255];
struct list_head link;
};
static void file_check(char *filename, struct file_stat *stat)
{
struct file_entry *f, *cur;
char *c;
if (stat->is_directory)
return;
c = strstr(filename, CONFIG_UPDATE_FILEEXT);
if (c && *(c + sizeof(CONFIG_UPDATE_FILEEXT) - 1) == '\0') {
f = malloc(sizeof(*f));
if (!f) {
printf("Failed to allocate memory\n");
return;
}
strncpy(f->filename, filename, 254);
if (!list_empty(&found_files)) {
list_for_each_entry(cur, &found_files, link) {
if (strcmp(cur->filename, f->filename) <= 0)
break;
}
list_add_tail(&f->link, &cur->link);
} else {
list_add_tail(&f->link, &found_files);
}
}
}
static int ask_user(char *buf, unsigned int len)
{
uchar code;
int i, n;
int key, num;
struct file_entry *cur;
metronome_disable_sync();
lcd_clear();
eputs("Select firmware update file:\n\n");
n = 0;
list_for_each_entry(cur, &found_files, link) {
n++;
log("%d. %s\n", n, cur->filename);
if (n > 9)
break;
}
log("\nC. Exit and continue booting\n");
metronome_enable_sync();
lcd_sync();
do {
key = 0;
num = -1;
if (i2c_read(CONFIG_LPC_I2C_ADDR, 0, 0, &code, 1))
continue;
key = find_key(code);
num = key_to_number(key);
} while ((key != KEY_ESC) && (num < 1));
if (num > 0) {
i = 0;
list_for_each_entry(cur, &found_files, link) {
i++;
if (i == num) {
strncpy(buf, cur->filename, len - 1);
break;
}
}
if (i != num)
return -1;
}
return num;
}
extern void _machine_restart(void);
static int do_checkupdate(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[])
{
int choice;
int res;
int dry_run = 0;
char filename[255];
struct file_entry *cur, *tmp;
if (!strcmp(argv[0], "check_and_updatesim")) {
dry_run = 1;
printf("Dry run mode\n");
}
INIT_LIST_HEAD(&found_files);
if (init_fat() < 0)
return 0;
dir_fat_read("/", file_check);
if (list_empty(&found_files))
return 0;
if (check_for_menu_key()) {
res = 0;
goto out;
}
choice = ask_user(filename, 255);
if (choice < 0) {
log("Continue booting...\n");
res = 0;
goto out;
}
saveenv();
lcd_clear();
log("\tStarting update...\n\n");
res = process_update(filename, dry_run);
if (!res) {
log("\nUpdate completed succesfully.\nRebooting...\n");
_machine_restart();
}
out:
list_for_each_entry_safe(cur, tmp, &found_files, link) {
list_del(&cur->link);
free(cur);
}
return res;
}
U_BOOT_CMD(
check_and_update, 1, 0, do_checkupdate,
"Check for firmware update, ask user and start\n",
NULL
);
U_BOOT_CMD(
check_and_updatesim, 1, 0, do_checkupdate,
"Check for firmware update, ask user and start update simulation\n",
NULL
);
| OpenInkpot-archive/uboot-n516 | common/cmd_update.c | C | gpl-2.0 | 19,636 |
/*
* A framebuffer driver for VBE 2.0+ compliant video cards
*
* (c) 2007 Michal Januszewski <spock@gentoo.org>
* Loosely based upon the vesafb driver.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/completion.h>
#include <linux/connector.h>
#include <linux/random.h>
#include <linux/platform_device.h>
#include <linux/limits.h>
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <video/edid.h>
#include <video/uvesafb.h>
#ifdef CONFIG_X86
#include <video/vga.h>
#include <linux/pci.h>
#endif
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include "edid.h"
static struct cb_id uvesafb_cn_id = {
.idx = CN_IDX_V86D,
.val = CN_VAL_V86D_UVESAFB
};
static char v86d_path[PATH_MAX] = "/sbin/v86d";
static char v86d_started; /* has v86d been started by uvesafb? */
static struct fb_fix_screeninfo uvesafb_fix __devinitdata = {
.id = "VESA VGA",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
.visual = FB_VISUAL_TRUECOLOR,
};
static int mtrr __devinitdata = 3; /* enable mtrr by default */
static int blank = 1; /* enable blanking by default */
static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
static bool pmi_setpal __devinitdata = true; /* use PMI for palette changes */
static int nocrtc __devinitdata; /* ignore CRTC settings */
static int noedid __devinitdata; /* don't try DDC transfers */
static int vram_remap __devinitdata; /* set amt. of memory to be used */
static int vram_total __devinitdata; /* set total amount of memory */
static u16 maxclk __devinitdata; /* maximum pixel clock */
static u16 maxvf __devinitdata; /* maximum vertical frequency */
static u16 maxhf __devinitdata; /* maximum horizontal frequency */
static u16 vbemode __devinitdata; /* force use of a specific VBE mode */
static char *mode_option __devinitdata;
static u8 dac_width = 6;
static struct uvesafb_ktask *uvfb_tasks[UVESAFB_TASKS_MAX];
static DEFINE_MUTEX(uvfb_lock);
/*
* A handler for replies from userspace.
*
* Make sure each message passes consistency checks and if it does,
* find the kernel part of the task struct, copy the registers and
* the buffer contents and then complete the task.
*/
static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
struct uvesafb_task *utask;
struct uvesafb_ktask *task;
if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
return;
if (msg->seq >= UVESAFB_TASKS_MAX)
return;
mutex_lock(&uvfb_lock);
task = uvfb_tasks[msg->seq];
if (!task || msg->ack != task->ack) {
mutex_unlock(&uvfb_lock);
return;
}
utask = (struct uvesafb_task *)msg->data;
/* Sanity checks for the buffer length. */
if (task->t.buf_len < utask->buf_len ||
utask->buf_len > msg->len - sizeof(*utask)) {
mutex_unlock(&uvfb_lock);
return;
}
uvfb_tasks[msg->seq] = NULL;
mutex_unlock(&uvfb_lock);
memcpy(&task->t, utask, sizeof(*utask));
if (task->t.buf_len && task->buf)
memcpy(task->buf, utask + 1, task->t.buf_len);
complete(task->done);
return;
}
static int uvesafb_helper_start(void)
{
char *envp[] = {
"HOME=/",
"PATH=/sbin:/bin",
NULL,
};
char *argv[] = {
v86d_path,
NULL,
};
return call_usermodehelper(v86d_path, argv, envp, 1);
}
/*
* Execute a uvesafb task.
*
* Returns 0 if the task is executed successfully.
*
* A message sent to the userspace consists of the uvesafb_task
* struct and (optionally) a buffer. The uvesafb_task struct is
* a simplified version of uvesafb_ktask (its kernel counterpart)
* containing only the register values, flags and the length of
* the buffer.
*
* Each message is assigned a sequence number (increased linearly)
* and a random ack number. The sequence number is used as a key
* for the uvfb_tasks array which holds pointers to uvesafb_ktask
* structs for all requests.
*/
static int uvesafb_exec(struct uvesafb_ktask *task)
{
static int seq;
struct cn_msg *m;
int err;
int len = sizeof(task->t) + task->t.buf_len;
/*
* Check whether the message isn't longer than the maximum
* allowed by connector.
*/
if (sizeof(*m) + len > CONNECTOR_MAX_MSG_SIZE) {
printk(KERN_WARNING "uvesafb: message too long (%d), "
"can't execute task\n", (int)(sizeof(*m) + len));
return -E2BIG;
}
m = kzalloc(sizeof(*m) + len, GFP_KERNEL);
if (!m)
return -ENOMEM;
init_completion(task->done);
memcpy(&m->id, &uvesafb_cn_id, sizeof(m->id));
m->seq = seq;
m->len = len;
m->ack = random32();
/* uvesafb_task structure */
memcpy(m + 1, &task->t, sizeof(task->t));
/* Buffer */
memcpy((u8 *)(m + 1) + sizeof(task->t), task->buf, task->t.buf_len);
/*
* Save the message ack number so that we can find the kernel
* part of this task when a reply is received from userspace.
*/
task->ack = m->ack;
mutex_lock(&uvfb_lock);
/* If all slots are taken -- bail out. */
if (uvfb_tasks[seq]) {
mutex_unlock(&uvfb_lock);
err = -EBUSY;
goto out;
}
/* Save a pointer to the kernel part of the task struct. */
uvfb_tasks[seq] = task;
mutex_unlock(&uvfb_lock);
err = cn_netlink_send(m, 0, GFP_KERNEL);
if (err == -ESRCH) {
/*
* Try to start the userspace helper if sending
* the request failed the first time.
*/
err = uvesafb_helper_start();
if (err) {
printk(KERN_ERR "uvesafb: failed to execute %s\n",
v86d_path);
printk(KERN_ERR "uvesafb: make sure that the v86d "
"helper is installed and executable\n");
} else {
v86d_started = 1;
err = cn_netlink_send(m, 0, gfp_any());
if (err == -ENOBUFS)
err = 0;
}
} else if (err == -ENOBUFS)
err = 0;
if (!err && !(task->t.flags & TF_EXIT))
err = !wait_for_completion_timeout(task->done,
msecs_to_jiffies(UVESAFB_TIMEOUT));
mutex_lock(&uvfb_lock);
uvfb_tasks[seq] = NULL;
mutex_unlock(&uvfb_lock);
seq++;
if (seq >= UVESAFB_TASKS_MAX)
seq = 0;
out:
kfree(m);
return err;
}
/*
* Free a uvesafb_ktask struct.
*/
static void uvesafb_free(struct uvesafb_ktask *task)
{
if (task) {
if (task->done)
kfree(task->done);
kfree(task);
}
}
/*
* Prepare a uvesafb_ktask struct to be used again.
*/
static void uvesafb_reset(struct uvesafb_ktask *task)
{
struct completion *cpl = task->done;
memset(task, 0, sizeof(*task));
task->done = cpl;
}
/*
* Allocate and prepare a uvesafb_ktask struct.
*/
static struct uvesafb_ktask *uvesafb_prep(void)
{
struct uvesafb_ktask *task;
task = kzalloc(sizeof(*task), GFP_KERNEL);
if (task) {
task->done = kzalloc(sizeof(*task->done), GFP_KERNEL);
if (!task->done) {
kfree(task);
task = NULL;
}
}
return task;
}
static void uvesafb_setup_var(struct fb_var_screeninfo *var,
struct fb_info *info, struct vbe_mode_ib *mode)
{
struct uvesafb_par *par = info->par;
var->vmode = FB_VMODE_NONINTERLACED;
var->sync = FB_SYNC_VERT_HIGH_ACT;
var->xres = mode->x_res;
var->yres = mode->y_res;
var->xres_virtual = mode->x_res;
var->yres_virtual = (par->ypan) ?
info->fix.smem_len / mode->bytes_per_scan_line :
mode->y_res;
var->xoffset = 0;
var->yoffset = 0;
var->bits_per_pixel = mode->bits_per_pixel;
if (var->bits_per_pixel == 15)
var->bits_per_pixel = 16;
if (var->bits_per_pixel > 8) {
var->red.offset = mode->red_off;
var->red.length = mode->red_len;
var->green.offset = mode->green_off;
var->green.length = mode->green_len;
var->blue.offset = mode->blue_off;
var->blue.length = mode->blue_len;
var->transp.offset = mode->rsvd_off;
var->transp.length = mode->rsvd_len;
} else {
var->red.offset = 0;
var->green.offset = 0;
var->blue.offset = 0;
var->transp.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 0;
}
}
static int uvesafb_vbe_find_mode(struct uvesafb_par *par,
int xres, int yres, int depth, unsigned char flags)
{
int i, match = -1, h = 0, d = 0x7fffffff;
for (i = 0; i < par->vbe_modes_cnt; i++) {
h = abs(par->vbe_modes[i].x_res - xres) +
abs(par->vbe_modes[i].y_res - yres) +
abs(depth - par->vbe_modes[i].depth);
/*
* We have an exact match in terms of resolution
* and depth.
*/
if (h == 0)
return i;
if (h < d || (h == d && par->vbe_modes[i].depth > depth)) {
d = h;
match = i;
}
}
i = 1;
if (flags & UVESAFB_EXACT_DEPTH &&
par->vbe_modes[match].depth != depth)
i = 0;
if (flags & UVESAFB_EXACT_RES && d > 24)
i = 0;
if (i != 0)
return match;
else
return -1;
}
static u8 *uvesafb_vbe_state_save(struct uvesafb_par *par)
{
struct uvesafb_ktask *task;
u8 *state;
int err;
if (!par->vbe_state_size)
return NULL;
state = kmalloc(par->vbe_state_size, GFP_KERNEL);
if (!state)
return NULL;
task = uvesafb_prep();
if (!task) {
kfree(state);
return NULL;
}
task->t.regs.eax = 0x4f04;
task->t.regs.ecx = 0x000f;
task->t.regs.edx = 0x0001;
task->t.flags = TF_BUF_RET | TF_BUF_ESBX;
task->t.buf_len = par->vbe_state_size;
task->buf = state;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
printk(KERN_WARNING "uvesafb: VBE get state call "
"failed (eax=0x%x, err=%d)\n",
task->t.regs.eax, err);
kfree(state);
state = NULL;
}
uvesafb_free(task);
return state;
}
static void uvesafb_vbe_state_restore(struct uvesafb_par *par, u8 *state_buf)
{
struct uvesafb_ktask *task;
int err;
if (!state_buf)
return;
task = uvesafb_prep();
if (!task)
return;
task->t.regs.eax = 0x4f04;
task->t.regs.ecx = 0x000f;
task->t.regs.edx = 0x0002;
task->t.buf_len = par->vbe_state_size;
task->t.flags = TF_BUF_ESBX;
task->buf = state_buf;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f)
printk(KERN_WARNING "uvesafb: VBE state restore call "
"failed (eax=0x%x, err=%d)\n",
task->t.regs.eax, err);
uvesafb_free(task);
}
static int __devinit uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
struct uvesafb_par *par)
{
int err;
task->t.regs.eax = 0x4f00;
task->t.flags = TF_VBEIB;
task->t.buf_len = sizeof(struct vbe_ib);
task->buf = &par->vbe_ib;
strncpy(par->vbe_ib.vbe_signature, "VBE2", 4);
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
printk(KERN_ERR "uvesafb: Getting VBE info block failed "
"(eax=0x%x, err=%d)\n", (u32)task->t.regs.eax,
err);
return -EINVAL;
}
if (par->vbe_ib.vbe_version < 0x0200) {
printk(KERN_ERR "uvesafb: Sorry, pre-VBE 2.0 cards are "
"not supported.\n");
return -EINVAL;
}
if (!par->vbe_ib.mode_list_ptr) {
printk(KERN_ERR "uvesafb: Missing mode list!\n");
return -EINVAL;
}
printk(KERN_INFO "uvesafb: ");
/*
* Convert string pointers and the mode list pointer into
* usable addresses. Print informational messages about the
* video adapter and its vendor.
*/
if (par->vbe_ib.oem_vendor_name_ptr)
printk("%s, ",
((char *)task->buf) + par->vbe_ib.oem_vendor_name_ptr);
if (par->vbe_ib.oem_product_name_ptr)
printk("%s, ",
((char *)task->buf) + par->vbe_ib.oem_product_name_ptr);
if (par->vbe_ib.oem_product_rev_ptr)
printk("%s, ",
((char *)task->buf) + par->vbe_ib.oem_product_rev_ptr);
if (par->vbe_ib.oem_string_ptr)
printk("OEM: %s, ",
((char *)task->buf) + par->vbe_ib.oem_string_ptr);
printk("VBE v%d.%d\n", ((par->vbe_ib.vbe_version & 0xff00) >> 8),
par->vbe_ib.vbe_version & 0xff);
return 0;
}
static int __devinit uvesafb_vbe_getmodes(struct uvesafb_ktask *task,
struct uvesafb_par *par)
{
int off = 0, err;
u16 *mode;
par->vbe_modes_cnt = 0;
/* Count available modes. */
mode = (u16 *) (((u8 *)&par->vbe_ib) + par->vbe_ib.mode_list_ptr);
while (*mode != 0xffff) {
par->vbe_modes_cnt++;
mode++;
}
par->vbe_modes = kzalloc(sizeof(struct vbe_mode_ib) *
par->vbe_modes_cnt, GFP_KERNEL);
if (!par->vbe_modes)
return -ENOMEM;
/* Get info about all available modes. */
mode = (u16 *) (((u8 *)&par->vbe_ib) + par->vbe_ib.mode_list_ptr);
while (*mode != 0xffff) {
struct vbe_mode_ib *mib;
uvesafb_reset(task);
task->t.regs.eax = 0x4f01;
task->t.regs.ecx = (u32) *mode;
task->t.flags = TF_BUF_RET | TF_BUF_ESDI;
task->t.buf_len = sizeof(struct vbe_mode_ib);
task->buf = par->vbe_modes + off;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
printk(KERN_WARNING "uvesafb: Getting mode info block "
"for mode 0x%x failed (eax=0x%x, err=%d)\n",
*mode, (u32)task->t.regs.eax, err);
mode++;
par->vbe_modes_cnt--;
continue;
}
mib = task->buf;
mib->mode_id = *mode;
/*
* We only want modes that are supported with the current
* hardware configuration, color, graphics and that have
* support for the LFB.
*/
if ((mib->mode_attr & VBE_MODE_MASK) == VBE_MODE_MASK &&
mib->bits_per_pixel >= 8)
off++;
else
par->vbe_modes_cnt--;
mode++;
mib->depth = mib->red_len + mib->green_len + mib->blue_len;
/*
* Handle 8bpp modes and modes with broken color component
* lengths.
*/
if (mib->depth == 0 || (mib->depth == 24 &&
mib->bits_per_pixel == 32))
mib->depth = mib->bits_per_pixel;
}
if (par->vbe_modes_cnt > 0)
return 0;
else
return -EINVAL;
}
/*
* The Protected Mode Interface is 32-bit x86 code, so we only run it on
* x86 and not x86_64.
*/
#ifdef CONFIG_X86_32
static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
struct uvesafb_par *par)
{
int i, err;
uvesafb_reset(task);
task->t.regs.eax = 0x4f0a;
task->t.regs.ebx = 0x0;
err = uvesafb_exec(task);
if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
par->pmi_setpal = par->ypan = 0;
} else {
par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
+ task->t.regs.edi);
par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
printk(KERN_INFO "uvesafb: protected mode interface info at "
"%04x:%04x\n",
(u16)task->t.regs.es, (u16)task->t.regs.edi);
printk(KERN_INFO "uvesafb: pmi: set display start = %p, "
"set palette = %p\n", par->pmi_start,
par->pmi_pal);
if (par->pmi_base[3]) {
printk(KERN_INFO "uvesafb: pmi: ports = ");
for (i = par->pmi_base[3]/2;
par->pmi_base[i] != 0xffff; i++)
printk("%x ", par->pmi_base[i]);
printk("\n");
if (par->pmi_base[i] != 0xffff) {
printk(KERN_INFO "uvesafb: can't handle memory"
" requests, pmi disabled\n");
par->ypan = par->pmi_setpal = 0;
}
}
}
return 0;
}
#endif /* CONFIG_X86_32 */
/*
* Check whether a video mode is supported by the Video BIOS and is
* compatible with the monitor limits.
*/
static int __devinit uvesafb_is_valid_mode(struct fb_videomode *mode,
struct fb_info *info)
{
if (info->monspecs.gtf) {
fb_videomode_to_var(&info->var, mode);
if (fb_validate_mode(&info->var, info))
return 0;
}
if (uvesafb_vbe_find_mode(info->par, mode->xres, mode->yres, 8,
UVESAFB_EXACT_RES) == -1)
return 0;
return 1;
}
static int __devinit uvesafb_vbe_getedid(struct uvesafb_ktask *task,
struct fb_info *info)
{
struct uvesafb_par *par = info->par;
int err = 0;
if (noedid || par->vbe_ib.vbe_version < 0x0300)
return -EINVAL;
task->t.regs.eax = 0x4f15;
task->t.regs.ebx = 0;
task->t.regs.ecx = 0;
task->t.buf_len = 0;
task->t.flags = 0;
err = uvesafb_exec(task);
if ((task->t.regs.eax & 0xffff) != 0x004f || err)
return -EINVAL;
if ((task->t.regs.ebx & 0x3) == 3) {
printk(KERN_INFO "uvesafb: VBIOS/hardware supports both "
"DDC1 and DDC2 transfers\n");
} else if ((task->t.regs.ebx & 0x3) == 2) {
printk(KERN_INFO "uvesafb: VBIOS/hardware supports DDC2 "
"transfers\n");
} else if ((task->t.regs.ebx & 0x3) == 1) {
printk(KERN_INFO "uvesafb: VBIOS/hardware supports DDC1 "
"transfers\n");
} else {
printk(KERN_INFO "uvesafb: VBIOS/hardware doesn't support "
"DDC transfers\n");
return -EINVAL;
}
task->t.regs.eax = 0x4f15;
task->t.regs.ebx = 1;
task->t.regs.ecx = task->t.regs.edx = 0;
task->t.flags = TF_BUF_RET | TF_BUF_ESDI;
task->t.buf_len = EDID_LENGTH;
task->buf = kzalloc(EDID_LENGTH, GFP_KERNEL);
err = uvesafb_exec(task);
if ((task->t.regs.eax & 0xffff) == 0x004f && !err) {
fb_edid_to_monspecs(task->buf, &info->monspecs);
if (info->monspecs.vfmax && info->monspecs.hfmax) {
/*
* If the maximum pixel clock wasn't specified in
* the EDID block, set it to 300 MHz.
*/
if (info->monspecs.dclkmax == 0)
info->monspecs.dclkmax = 300 * 1000000;
info->monspecs.gtf = 1;
}
} else {
err = -EINVAL;
}
kfree(task->buf);
return err;
}
static void __devinit uvesafb_vbe_getmonspecs(struct uvesafb_ktask *task,
struct fb_info *info)
{
struct uvesafb_par *par = info->par;
int i;
memset(&info->monspecs, 0, sizeof(info->monspecs));
/*
* If we don't get all necessary data from the EDID block,
* mark it as incompatible with the GTF and set nocrtc so
* that we always use the default BIOS refresh rate.
*/
if (uvesafb_vbe_getedid(task, info)) {
info->monspecs.gtf = 0;
par->nocrtc = 1;
}
/* Kernel command line overrides. */
if (maxclk)
info->monspecs.dclkmax = maxclk * 1000000;
if (maxvf)
info->monspecs.vfmax = maxvf;
if (maxhf)
info->monspecs.hfmax = maxhf * 1000;
/*
* In case DDC transfers are not supported, the user can provide
* monitor limits manually. Lower limits are set to "safe" values.
*/
if (info->monspecs.gtf == 0 && maxclk && maxvf && maxhf) {
info->monspecs.dclkmin = 0;
info->monspecs.vfmin = 60;
info->monspecs.hfmin = 29000;
info->monspecs.gtf = 1;
par->nocrtc = 0;
}
if (info->monspecs.gtf)
printk(KERN_INFO
"uvesafb: monitor limits: vf = %d Hz, hf = %d kHz, "
"clk = %d MHz\n", info->monspecs.vfmax,
(int)(info->monspecs.hfmax / 1000),
(int)(info->monspecs.dclkmax / 1000000));
else
printk(KERN_INFO "uvesafb: no monitor limits have been set, "
"default refresh rate will be used\n");
/* Add VBE modes to the modelist. */
for (i = 0; i < par->vbe_modes_cnt; i++) {
struct fb_var_screeninfo var;
struct vbe_mode_ib *mode;
struct fb_videomode vmode;
mode = &par->vbe_modes[i];
memset(&var, 0, sizeof(var));
var.xres = mode->x_res;
var.yres = mode->y_res;
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60, &var, info);
fb_var_to_videomode(&vmode, &var);
fb_add_videomode(&vmode, &info->modelist);
}
/* Add valid VESA modes to our modelist. */
for (i = 0; i < VESA_MODEDB_SIZE; i++) {
if (uvesafb_is_valid_mode((struct fb_videomode *)
&vesa_modes[i], info))
fb_add_videomode(&vesa_modes[i], &info->modelist);
}
for (i = 0; i < info->monspecs.modedb_len; i++) {
if (uvesafb_is_valid_mode(&info->monspecs.modedb[i], info))
fb_add_videomode(&info->monspecs.modedb[i],
&info->modelist);
}
return;
}
static void __devinit uvesafb_vbe_getstatesize(struct uvesafb_ktask *task,
struct uvesafb_par *par)
{
int err;
uvesafb_reset(task);
/*
* Get the VBE state buffer size. We want all available
* hardware state data (CL = 0x0f).
*/
task->t.regs.eax = 0x4f04;
task->t.regs.ecx = 0x000f;
task->t.regs.edx = 0x0000;
task->t.flags = 0;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
printk(KERN_WARNING "uvesafb: VBE state buffer size "
"cannot be determined (eax=0x%x, err=%d)\n",
task->t.regs.eax, err);
par->vbe_state_size = 0;
return;
}
par->vbe_state_size = 64 * (task->t.regs.ebx & 0xffff);
}
static int __devinit uvesafb_vbe_init(struct fb_info *info)
{
struct uvesafb_ktask *task = NULL;
struct uvesafb_par *par = info->par;
int err;
task = uvesafb_prep();
if (!task)
return -ENOMEM;
err = uvesafb_vbe_getinfo(task, par);
if (err)
goto out;
err = uvesafb_vbe_getmodes(task, par);
if (err)
goto out;
par->nocrtc = nocrtc;
#ifdef CONFIG_X86_32
par->pmi_setpal = pmi_setpal;
par->ypan = ypan;
if (par->pmi_setpal || par->ypan) {
if (__supported_pte_mask & _PAGE_NX) {
par->pmi_setpal = par->ypan = 0;
printk(KERN_WARNING "uvesafb: NX protection is actively."
"We have better not to use the PMI.\n");
} else {
uvesafb_vbe_getpmi(task, par);
}
}
#else
/* The protected mode interface is not available on non-x86. */
par->pmi_setpal = par->ypan = 0;
#endif
INIT_LIST_HEAD(&info->modelist);
uvesafb_vbe_getmonspecs(task, info);
uvesafb_vbe_getstatesize(task, par);
out: uvesafb_free(task);
return err;
}
static int __devinit uvesafb_vbe_init_mode(struct fb_info *info)
{
struct list_head *pos;
struct fb_modelist *modelist;
struct fb_videomode *mode;
struct uvesafb_par *par = info->par;
int i, modeid;
/* Has the user requested a specific VESA mode? */
if (vbemode) {
for (i = 0; i < par->vbe_modes_cnt; i++) {
if (par->vbe_modes[i].mode_id == vbemode) {
modeid = i;
uvesafb_setup_var(&info->var, info,
&par->vbe_modes[modeid]);
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60,
&info->var, info);
/*
* With pixclock set to 0, the default BIOS
* timings will be used in set_par().
*/
info->var.pixclock = 0;
goto gotmode;
}
}
printk(KERN_INFO "uvesafb: requested VBE mode 0x%x is "
"unavailable\n", vbemode);
vbemode = 0;
}
/* Count the modes in the modelist */
i = 0;
list_for_each(pos, &info->modelist)
i++;
/*
* Convert the modelist into a modedb so that we can use it with
* fb_find_mode().
*/
mode = kzalloc(i * sizeof(*mode), GFP_KERNEL);
if (mode) {
i = 0;
list_for_each(pos, &info->modelist) {
modelist = list_entry(pos, struct fb_modelist, list);
mode[i] = modelist->mode;
i++;
}
if (!mode_option)
mode_option = UVESAFB_DEFAULT_MODE;
i = fb_find_mode(&info->var, info, mode_option, mode, i,
NULL, 8);
kfree(mode);
}
/* fb_find_mode() failed */
if (i == 0) {
info->var.xres = 640;
info->var.yres = 480;
mode = (struct fb_videomode *)
fb_find_best_mode(&info->var, &info->modelist);
if (mode) {
fb_videomode_to_var(&info->var, mode);
} else {
modeid = par->vbe_modes[0].mode_id;
uvesafb_setup_var(&info->var, info,
&par->vbe_modes[modeid]);
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60,
&info->var, info);
goto gotmode;
}
}
/* Look for a matching VBE mode. */
modeid = uvesafb_vbe_find_mode(par, info->var.xres, info->var.yres,
info->var.bits_per_pixel, UVESAFB_EXACT_RES);
if (modeid == -1)
return -EINVAL;
uvesafb_setup_var(&info->var, info, &par->vbe_modes[modeid]);
gotmode:
/*
* If we are not VBE3.0+ compliant, we're done -- the BIOS will
* ignore our timings anyway.
*/
if (par->vbe_ib.vbe_version < 0x0300 || par->nocrtc)
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60,
&info->var, info);
return modeid;
}
static int uvesafb_setpalette(struct uvesafb_pal_entry *entries, int count,
int start, struct fb_info *info)
{
struct uvesafb_ktask *task;
#ifdef CONFIG_X86
struct uvesafb_par *par = info->par;
int i = par->mode_idx;
#endif
int err = 0;
/*
* We support palette modifications for 8 bpp modes only, so
* there can never be more than 256 entries.
*/
if (start + count > 256)
return -EINVAL;
#ifdef CONFIG_X86
/* Use VGA registers if mode is VGA-compatible. */
if (i >= 0 && i < par->vbe_modes_cnt &&
par->vbe_modes[i].mode_attr & VBE_MODE_VGACOMPAT) {
for (i = 0; i < count; i++) {
outb_p(start + i, dac_reg);
outb_p(entries[i].red, dac_val);
outb_p(entries[i].green, dac_val);
outb_p(entries[i].blue, dac_val);
}
}
#ifdef CONFIG_X86_32
else if (par->pmi_setpal) {
__asm__ __volatile__(
"call *(%%esi)"
: /* no return value */
: "a" (0x4f09), /* EAX */
"b" (0), /* EBX */
"c" (count), /* ECX */
"d" (start), /* EDX */
"D" (entries), /* EDI */
"S" (&par->pmi_pal)); /* ESI */
}
#endif /* CONFIG_X86_32 */
else
#endif /* CONFIG_X86 */
{
task = uvesafb_prep();
if (!task)
return -ENOMEM;
task->t.regs.eax = 0x4f09;
task->t.regs.ebx = 0x0;
task->t.regs.ecx = count;
task->t.regs.edx = start;
task->t.flags = TF_BUF_ESDI;
task->t.buf_len = sizeof(struct uvesafb_pal_entry) * count;
task->buf = entries;
err = uvesafb_exec(task);
if ((task->t.regs.eax & 0xffff) != 0x004f)
err = 1;
uvesafb_free(task);
}
return err;
}
static int uvesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct uvesafb_pal_entry entry;
int shift = 16 - dac_width;
int err = 0;
if (regno >= info->cmap.len)
return -EINVAL;
if (info->var.bits_per_pixel == 8) {
entry.red = red >> shift;
entry.green = green >> shift;
entry.blue = blue >> shift;
entry.pad = 0;
err = uvesafb_setpalette(&entry, 1, regno, info);
} else if (regno < 16) {
switch (info->var.bits_per_pixel) {
case 16:
if (info->var.red.offset == 10) {
/* 1:5:5:5 */
((u32 *) (info->pseudo_palette))[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
} else {
/* 0:5:6:5 */
((u32 *) (info->pseudo_palette))[regno] =
((red & 0xf800) ) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
}
break;
case 24:
case 32:
red >>= 8;
green >>= 8;
blue >>= 8;
((u32 *)(info->pseudo_palette))[regno] =
(red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
break;
}
}
return err;
}
static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct uvesafb_pal_entry *entries;
int shift = 16 - dac_width;
int i, err = 0;
if (info->var.bits_per_pixel == 8) {
if (cmap->start + cmap->len > info->cmap.start +
info->cmap.len || cmap->start < info->cmap.start)
return -EINVAL;
entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL);
if (!entries)
return -ENOMEM;
for (i = 0; i < cmap->len; i++) {
entries[i].red = cmap->red[i] >> shift;
entries[i].green = cmap->green[i] >> shift;
entries[i].blue = cmap->blue[i] >> shift;
entries[i].pad = 0;
}
err = uvesafb_setpalette(entries, cmap->len, cmap->start, info);
kfree(entries);
} else {
/*
* For modes with bpp > 8, we only set the pseudo palette in
* the fb_info struct. We rely on uvesafb_setcolreg to do all
* sanity checking.
*/
for (i = 0; i < cmap->len; i++) {
err |= uvesafb_setcolreg(cmap->start + i, cmap->red[i],
cmap->green[i], cmap->blue[i],
0, info);
}
}
return err;
}
static int uvesafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
#ifdef CONFIG_X86_32
int offset;
struct uvesafb_par *par = info->par;
offset = (var->yoffset * info->fix.line_length + var->xoffset) / 4;
/*
* It turns out it's not the best idea to do panning via vm86,
* so we only allow it if we have a PMI.
*/
if (par->pmi_start) {
__asm__ __volatile__(
"call *(%%edi)"
: /* no return value */
: "a" (0x4f07), /* EAX */
"b" (0), /* EBX */
"c" (offset), /* ECX */
"d" (offset >> 16), /* EDX */
"D" (&par->pmi_start)); /* EDI */
}
#endif
return 0;
}
static int uvesafb_blank(int blank, struct fb_info *info)
{
struct uvesafb_ktask *task;
int err = 1;
#ifdef CONFIG_X86
struct uvesafb_par *par = info->par;
if (par->vbe_ib.capabilities & VBE_CAP_VGACOMPAT) {
int loop = 10000;
u8 seq = 0, crtc17 = 0;
if (blank == FB_BLANK_POWERDOWN) {
seq = 0x20;
crtc17 = 0x00;
err = 0;
} else {
seq = 0x00;
crtc17 = 0x80;
err = (blank == FB_BLANK_UNBLANK) ? 0 : -EINVAL;
}
vga_wseq(NULL, 0x00, 0x01);
seq |= vga_rseq(NULL, 0x01) & ~0x20;
vga_wseq(NULL, 0x00, seq);
crtc17 |= vga_rcrt(NULL, 0x17) & ~0x80;
while (loop--);
vga_wcrt(NULL, 0x17, crtc17);
vga_wseq(NULL, 0x00, 0x03);
} else
#endif /* CONFIG_X86 */
{
task = uvesafb_prep();
if (!task)
return -ENOMEM;
task->t.regs.eax = 0x4f10;
switch (blank) {
case FB_BLANK_UNBLANK:
task->t.regs.ebx = 0x0001;
break;
case FB_BLANK_NORMAL:
task->t.regs.ebx = 0x0101; /* standby */
break;
case FB_BLANK_POWERDOWN:
task->t.regs.ebx = 0x0401; /* powerdown */
break;
default:
goto out;
}
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f)
err = 1;
out: uvesafb_free(task);
}
return err;
}
static int uvesafb_open(struct fb_info *info, int user)
{
struct uvesafb_par *par = info->par;
int cnt = atomic_read(&par->ref_count);
if (!cnt && par->vbe_state_size)
par->vbe_state_orig = uvesafb_vbe_state_save(par);
atomic_inc(&par->ref_count);
return 0;
}
static int uvesafb_release(struct fb_info *info, int user)
{
struct uvesafb_ktask *task = NULL;
struct uvesafb_par *par = info->par;
int cnt = atomic_read(&par->ref_count);
if (!cnt)
return -EINVAL;
if (cnt != 1)
goto out;
task = uvesafb_prep();
if (!task)
goto out;
/* First, try to set the standard 80x25 text mode. */
task->t.regs.eax = 0x0003;
uvesafb_exec(task);
/*
* Now try to restore whatever hardware state we might have
* saved when the fb device was first opened.
*/
uvesafb_vbe_state_restore(par, par->vbe_state_orig);
out:
atomic_dec(&par->ref_count);
if (task)
uvesafb_free(task);
return 0;
}
static int uvesafb_set_par(struct fb_info *info)
{
struct uvesafb_par *par = info->par;
struct uvesafb_ktask *task = NULL;
struct vbe_crtc_ib *crtc = NULL;
struct vbe_mode_ib *mode = NULL;
int i, err = 0, depth = info->var.bits_per_pixel;
if (depth > 8 && depth != 32)
depth = info->var.red.length + info->var.green.length +
info->var.blue.length;
i = uvesafb_vbe_find_mode(par, info->var.xres, info->var.yres, depth,
UVESAFB_EXACT_RES | UVESAFB_EXACT_DEPTH);
if (i >= 0)
mode = &par->vbe_modes[i];
else
return -EINVAL;
task = uvesafb_prep();
if (!task)
return -ENOMEM;
setmode:
task->t.regs.eax = 0x4f02;
task->t.regs.ebx = mode->mode_id | 0x4000; /* use LFB */
if (par->vbe_ib.vbe_version >= 0x0300 && !par->nocrtc &&
info->var.pixclock != 0) {
task->t.regs.ebx |= 0x0800; /* use CRTC data */
task->t.flags = TF_BUF_ESDI;
crtc = kzalloc(sizeof(struct vbe_crtc_ib), GFP_KERNEL);
if (!crtc) {
err = -ENOMEM;
goto out;
}
crtc->horiz_start = info->var.xres + info->var.right_margin;
crtc->horiz_end = crtc->horiz_start + info->var.hsync_len;
crtc->horiz_total = crtc->horiz_end + info->var.left_margin;
crtc->vert_start = info->var.yres + info->var.lower_margin;
crtc->vert_end = crtc->vert_start + info->var.vsync_len;
crtc->vert_total = crtc->vert_end + info->var.upper_margin;
crtc->pixel_clock = PICOS2KHZ(info->var.pixclock) * 1000;
crtc->refresh_rate = (u16)(100 * (crtc->pixel_clock /
(crtc->vert_total * crtc->horiz_total)));
if (info->var.vmode & FB_VMODE_DOUBLE)
crtc->flags |= 0x1;
if (info->var.vmode & FB_VMODE_INTERLACED)
crtc->flags |= 0x2;
if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
crtc->flags |= 0x4;
if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
crtc->flags |= 0x8;
memcpy(&par->crtc, crtc, sizeof(*crtc));
} else {
memset(&par->crtc, 0, sizeof(*crtc));
}
task->t.buf_len = sizeof(struct vbe_crtc_ib);
task->buf = &par->crtc;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
/*
* The mode switch might have failed because we tried to
* use our own timings. Try again with the default timings.
*/
if (crtc != NULL) {
printk(KERN_WARNING "uvesafb: mode switch failed "
"(eax=0x%x, err=%d). Trying again with "
"default timings.\n", task->t.regs.eax, err);
uvesafb_reset(task);
kfree(crtc);
crtc = NULL;
info->var.pixclock = 0;
goto setmode;
} else {
printk(KERN_ERR "uvesafb: mode switch failed (eax="
"0x%x, err=%d)\n", task->t.regs.eax, err);
err = -EINVAL;
goto out;
}
}
par->mode_idx = i;
/* For 8bpp modes, always try to set the DAC to 8 bits. */
if (par->vbe_ib.capabilities & VBE_CAP_CAN_SWITCH_DAC &&
mode->bits_per_pixel <= 8) {
uvesafb_reset(task);
task->t.regs.eax = 0x4f08;
task->t.regs.ebx = 0x0800;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f ||
((task->t.regs.ebx & 0xff00) >> 8) != 8) {
dac_width = 6;
} else {
dac_width = 8;
}
}
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
info->fix.line_length = mode->bytes_per_scan_line;
out: if (crtc != NULL)
kfree(crtc);
uvesafb_free(task);
return err;
}
static void uvesafb_check_limits(struct fb_var_screeninfo *var,
struct fb_info *info)
{
const struct fb_videomode *mode;
struct uvesafb_par *par = info->par;
/*
* If pixclock is set to 0, then we're using default BIOS timings
* and thus don't have to perform any checks here.
*/
if (!var->pixclock)
return;
if (par->vbe_ib.vbe_version < 0x0300) {
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60, var, info);
return;
}
if (!fb_validate_mode(var, info))
return;
mode = fb_find_best_mode(var, &info->modelist);
if (mode) {
if (mode->xres == var->xres && mode->yres == var->yres &&
!(mode->vmode & (FB_VMODE_INTERLACED | FB_VMODE_DOUBLE))) {
fb_videomode_to_var(var, mode);
return;
}
}
if (info->monspecs.gtf && !fb_get_mode(FB_MAXTIMINGS, 0, var, info))
return;
/* Use default refresh rate */
var->pixclock = 0;
}
static int uvesafb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct uvesafb_par *par = info->par;
struct vbe_mode_ib *mode = NULL;
int match = -1;
int depth = var->red.length + var->green.length + var->blue.length;
/*
* Various apps will use bits_per_pixel to set the color depth,
* which is theoretically incorrect, but which we'll try to handle
* here.
*/
if (depth == 0 || abs(depth - var->bits_per_pixel) >= 8)
depth = var->bits_per_pixel;
match = uvesafb_vbe_find_mode(par, var->xres, var->yres, depth,
UVESAFB_EXACT_RES);
if (match == -1)
return -EINVAL;
mode = &par->vbe_modes[match];
uvesafb_setup_var(var, info, mode);
/*
* Check whether we have remapped enough memory for this mode.
* We might be called at an early stage, when we haven't remapped
* any memory yet, in which case we simply skip the check.
*/
if (var->yres * mode->bytes_per_scan_line > info->fix.smem_len
&& info->fix.smem_len)
return -EINVAL;
if ((var->vmode & FB_VMODE_DOUBLE) &&
!(par->vbe_modes[match].mode_attr & 0x100))
var->vmode &= ~FB_VMODE_DOUBLE;
if ((var->vmode & FB_VMODE_INTERLACED) &&
!(par->vbe_modes[match].mode_attr & 0x200))
var->vmode &= ~FB_VMODE_INTERLACED;
uvesafb_check_limits(var, info);
var->xres_virtual = var->xres;
var->yres_virtual = (par->ypan) ?
info->fix.smem_len / mode->bytes_per_scan_line :
var->yres;
return 0;
}
static struct fb_ops uvesafb_ops = {
.owner = THIS_MODULE,
.fb_open = uvesafb_open,
.fb_release = uvesafb_release,
.fb_setcolreg = uvesafb_setcolreg,
.fb_setcmap = uvesafb_setcmap,
.fb_pan_display = uvesafb_pan_display,
.fb_blank = uvesafb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_check_var = uvesafb_check_var,
.fb_set_par = uvesafb_set_par,
};
static void __devinit uvesafb_init_info(struct fb_info *info,
struct vbe_mode_ib *mode)
{
unsigned int size_vmode;
unsigned int size_remap;
unsigned int size_total;
struct uvesafb_par *par = info->par;
int i, h;
info->pseudo_palette = ((u8 *)info->par + sizeof(struct uvesafb_par));
info->fix = uvesafb_fix;
info->fix.ypanstep = par->ypan ? 1 : 0;
info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
/* Disable blanking if the user requested so. */
if (!blank)
info->fbops->fb_blank = NULL;
/*
* Find out how much IO memory is required for the mode with
* the highest resolution.
*/
size_remap = 0;
for (i = 0; i < par->vbe_modes_cnt; i++) {
h = par->vbe_modes[i].bytes_per_scan_line *
par->vbe_modes[i].y_res;
if (h > size_remap)
size_remap = h;
}
size_remap *= 2;
/*
* size_vmode -- that is the amount of memory needed for the
* used video mode, i.e. the minimum amount of
* memory we need.
*/
if (mode != NULL) {
size_vmode = info->var.yres * mode->bytes_per_scan_line;
} else {
size_vmode = info->var.yres * info->var.xres *
((info->var.bits_per_pixel + 7) >> 3);
}
/*
* size_total -- all video memory we have. Used for mtrr
* entries, resource allocation and bounds
* checking.
*/
size_total = par->vbe_ib.total_memory * 65536;
if (vram_total)
size_total = vram_total * 1024 * 1024;
if (size_total < size_vmode)
size_total = size_vmode;
/*
* size_remap -- the amount of video memory we are going to
* use for vesafb. With modern cards it is no
* option to simply use size_total as th
* wastes plenty of kernel address space.
*/
if (vram_remap)
size_remap = vram_remap * 1024 * 1024;
if (size_remap < size_vmode)
size_remap = size_vmode;
if (size_remap > size_total)
size_remap = size_total;
info->fix.smem_len = size_remap;
info->fix.smem_start = mode->phys_base_ptr;
/*
* We have to set yres_virtual here because when setup_var() was
* called, smem_len wasn't defined yet.
*/
info->var.yres_virtual = info->fix.smem_len /
mode->bytes_per_scan_line;
if (par->ypan && info->var.yres_virtual > info->var.yres) {
printk(KERN_INFO "uvesafb: scrolling: %s "
"using protected mode interface, "
"yres_virtual=%d\n",
(par->ypan > 1) ? "ywrap" : "ypan",
info->var.yres_virtual);
} else {
printk(KERN_INFO "uvesafb: scrolling: redraw\n");
info->var.yres_virtual = info->var.yres;
par->ypan = 0;
}
info->flags = FBINFO_FLAG_DEFAULT |
(par->ypan ? FBINFO_HWACCEL_YPAN : 0);
if (!par->ypan)
info->fbops->fb_pan_display = NULL;
}
static void __devinit uvesafb_init_mtrr(struct fb_info *info)
{
#ifdef CONFIG_MTRR
if (mtrr && !(info->fix.smem_start & (PAGE_SIZE - 1))) {
int temp_size = info->fix.smem_len;
unsigned int type = 0;
switch (mtrr) {
case 1:
type = MTRR_TYPE_UNCACHABLE;
break;
case 2:
type = MTRR_TYPE_WRBACK;
break;
case 3:
type = MTRR_TYPE_WRCOMB;
break;
case 4:
type = MTRR_TYPE_WRTHROUGH;
break;
default:
type = 0;
break;
}
if (type) {
int rc;
/* Find the largest power-of-two */
temp_size = roundup_pow_of_two(temp_size);
/* Try and find a power of two to add */
do {
rc = mtrr_add(info->fix.smem_start,
temp_size, type, 1);
temp_size >>= 1;
} while (temp_size >= PAGE_SIZE && rc == -EINVAL);
}
}
#endif /* CONFIG_MTRR */
}
static void __devinit uvesafb_ioremap(struct fb_info *info)
{
#ifdef CONFIG_X86
switch (mtrr) {
case 1: /* uncachable */
info->screen_base = ioremap_nocache(info->fix.smem_start, info->fix.smem_len);
break;
case 2: /* write-back */
info->screen_base = ioremap_cache(info->fix.smem_start, info->fix.smem_len);
break;
case 3: /* write-combining */
info->screen_base = ioremap_wc(info->fix.smem_start, info->fix.smem_len);
break;
case 4: /* write-through */
default:
info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
break;
}
#else
info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
#endif /* CONFIG_X86 */
}
static ssize_t uvesafb_show_vbe_ver(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
return snprintf(buf, PAGE_SIZE, "%.4x\n", par->vbe_ib.vbe_version);
}
static DEVICE_ATTR(vbe_version, S_IRUGO, uvesafb_show_vbe_ver, NULL);
static ssize_t uvesafb_show_vbe_modes(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
int ret = 0, i;
for (i = 0; i < par->vbe_modes_cnt && ret < PAGE_SIZE; i++) {
ret += snprintf(buf + ret, PAGE_SIZE - ret,
"%dx%d-%d, 0x%.4x\n",
par->vbe_modes[i].x_res, par->vbe_modes[i].y_res,
par->vbe_modes[i].depth, par->vbe_modes[i].mode_id);
}
return ret;
}
static DEVICE_ATTR(vbe_modes, S_IRUGO, uvesafb_show_vbe_modes, NULL);
static ssize_t uvesafb_show_vendor(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_vendor_name_ptr)
return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_vendor_name_ptr);
else
return 0;
}
static DEVICE_ATTR(oem_vendor, S_IRUGO, uvesafb_show_vendor, NULL);
static ssize_t uvesafb_show_product_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_name_ptr)
return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_product_name_ptr);
else
return 0;
}
static DEVICE_ATTR(oem_product_name, S_IRUGO, uvesafb_show_product_name, NULL);
static ssize_t uvesafb_show_product_rev(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_rev_ptr)
return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_product_rev_ptr);
else
return 0;
}
static DEVICE_ATTR(oem_product_rev, S_IRUGO, uvesafb_show_product_rev, NULL);
static ssize_t uvesafb_show_oem_string(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_string_ptr)
return snprintf(buf, PAGE_SIZE, "%s\n",
(char *)(&par->vbe_ib) + par->vbe_ib.oem_string_ptr);
else
return 0;
}
static DEVICE_ATTR(oem_string, S_IRUGO, uvesafb_show_oem_string, NULL);
static ssize_t uvesafb_show_nocrtc(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
return snprintf(buf, PAGE_SIZE, "%d\n", par->nocrtc);
}
static ssize_t uvesafb_store_nocrtc(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
if (count > 0) {
if (buf[0] == '0')
par->nocrtc = 0;
else
par->nocrtc = 1;
}
return count;
}
static DEVICE_ATTR(nocrtc, S_IRUGO | S_IWUSR, uvesafb_show_nocrtc,
uvesafb_store_nocrtc);
static struct attribute *uvesafb_dev_attrs[] = {
&dev_attr_vbe_version.attr,
&dev_attr_vbe_modes.attr,
&dev_attr_oem_vendor.attr,
&dev_attr_oem_product_name.attr,
&dev_attr_oem_product_rev.attr,
&dev_attr_oem_string.attr,
&dev_attr_nocrtc.attr,
NULL,
};
static struct attribute_group uvesafb_dev_attgrp = {
.name = NULL,
.attrs = uvesafb_dev_attrs,
};
static int __devinit uvesafb_probe(struct platform_device *dev)
{
struct fb_info *info;
struct vbe_mode_ib *mode = NULL;
struct uvesafb_par *par;
int err = 0, i;
info = framebuffer_alloc(sizeof(*par) + sizeof(u32) * 256, &dev->dev);
if (!info)
return -ENOMEM;
par = info->par;
err = uvesafb_vbe_init(info);
if (err) {
printk(KERN_ERR "uvesafb: vbe_init() failed with %d\n", err);
goto out;
}
info->fbops = &uvesafb_ops;
i = uvesafb_vbe_init_mode(info);
if (i < 0) {
err = -EINVAL;
goto out;
} else {
mode = &par->vbe_modes[i];
}
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENXIO;
goto out;
}
uvesafb_init_info(info, mode);
if (!request_region(0x3c0, 32, "uvesafb")) {
printk(KERN_ERR "uvesafb: request region 0x3c0-0x3e0 failed\n");
err = -EIO;
goto out_mode;
}
if (!request_mem_region(info->fix.smem_start, info->fix.smem_len,
"uvesafb")) {
printk(KERN_ERR "uvesafb: cannot reserve video memory at "
"0x%lx\n", info->fix.smem_start);
err = -EIO;
goto out_reg;
}
uvesafb_init_mtrr(info);
uvesafb_ioremap(info);
if (!info->screen_base) {
printk(KERN_ERR
"uvesafb: abort, cannot ioremap 0x%x bytes of video "
"memory at 0x%lx\n",
info->fix.smem_len, info->fix.smem_start);
err = -EIO;
goto out_mem;
}
platform_set_drvdata(dev, info);
if (register_framebuffer(info) < 0) {
printk(KERN_ERR
"uvesafb: failed to register framebuffer device\n");
err = -EINVAL;
goto out_unmap;
}
printk(KERN_INFO "uvesafb: framebuffer at 0x%lx, mapped to 0x%p, "
"using %dk, total %dk\n", info->fix.smem_start,
info->screen_base, info->fix.smem_len/1024,
par->vbe_ib.total_memory * 64);
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
info->fix.id);
err = sysfs_create_group(&dev->dev.kobj, &uvesafb_dev_attgrp);
if (err != 0)
printk(KERN_WARNING "fb%d: failed to register attributes\n",
info->node);
return 0;
out_unmap:
iounmap(info->screen_base);
out_mem:
release_mem_region(info->fix.smem_start, info->fix.smem_len);
out_reg:
release_region(0x3c0, 32);
out_mode:
if (!list_empty(&info->modelist))
fb_destroy_modelist(&info->modelist);
fb_destroy_modedb(info->monspecs.modedb);
fb_dealloc_cmap(&info->cmap);
out:
if (par->vbe_modes)
kfree(par->vbe_modes);
framebuffer_release(info);
return err;
}
static int uvesafb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
if (info) {
struct uvesafb_par *par = info->par;
sysfs_remove_group(&dev->dev.kobj, &uvesafb_dev_attgrp);
unregister_framebuffer(info);
release_region(0x3c0, 32);
iounmap(info->screen_base);
release_mem_region(info->fix.smem_start, info->fix.smem_len);
fb_destroy_modedb(info->monspecs.modedb);
fb_dealloc_cmap(&info->cmap);
if (par) {
if (par->vbe_modes)
kfree(par->vbe_modes);
if (par->vbe_state_orig)
kfree(par->vbe_state_orig);
if (par->vbe_state_saved)
kfree(par->vbe_state_saved);
}
framebuffer_release(info);
}
return 0;
}
static struct platform_driver uvesafb_driver = {
.probe = uvesafb_probe,
.remove = uvesafb_remove,
.driver = {
.name = "uvesafb",
},
};
static struct platform_device *uvesafb_device;
#ifndef MODULE
static int __devinit uvesafb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
if (!strcmp(this_opt, "redraw"))
ypan = 0;
else if (!strcmp(this_opt, "ypan"))
ypan = 1;
else if (!strcmp(this_opt, "ywrap"))
ypan = 2;
else if (!strcmp(this_opt, "vgapal"))
pmi_setpal = 0;
else if (!strcmp(this_opt, "pmipal"))
pmi_setpal = 1;
else if (!strncmp(this_opt, "mtrr:", 5))
mtrr = simple_strtoul(this_opt+5, NULL, 0);
else if (!strcmp(this_opt, "nomtrr"))
mtrr = 0;
else if (!strcmp(this_opt, "nocrtc"))
nocrtc = 1;
else if (!strcmp(this_opt, "noedid"))
noedid = 1;
else if (!strcmp(this_opt, "noblank"))
blank = 0;
else if (!strncmp(this_opt, "vtotal:", 7))
vram_total = simple_strtoul(this_opt + 7, NULL, 0);
else if (!strncmp(this_opt, "vremap:", 7))
vram_remap = simple_strtoul(this_opt + 7, NULL, 0);
else if (!strncmp(this_opt, "maxhf:", 6))
maxhf = simple_strtoul(this_opt + 6, NULL, 0);
else if (!strncmp(this_opt, "maxvf:", 6))
maxvf = simple_strtoul(this_opt + 6, NULL, 0);
else if (!strncmp(this_opt, "maxclk:", 7))
maxclk = simple_strtoul(this_opt + 7, NULL, 0);
else if (!strncmp(this_opt, "vbemode:", 8))
vbemode = simple_strtoul(this_opt + 8, NULL, 0);
else if (this_opt[0] >= '0' && this_opt[0] <= '9') {
mode_option = this_opt;
} else {
printk(KERN_WARNING
"uvesafb: unrecognized option %s\n", this_opt);
}
}
return 0;
}
#endif /* !MODULE */
static ssize_t show_v86d(struct device_driver *dev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", v86d_path);
}
static ssize_t store_v86d(struct device_driver *dev, const char *buf,
size_t count)
{
strncpy(v86d_path, buf, PATH_MAX);
return count;
}
static DRIVER_ATTR(v86d, S_IRUGO | S_IWUSR, show_v86d, store_v86d);
static int __devinit uvesafb_init(void)
{
int err;
#ifndef MODULE
char *option = NULL;
if (fb_get_options("uvesafb", &option))
return -ENODEV;
uvesafb_setup(option);
#endif
err = cn_add_callback(&uvesafb_cn_id, "uvesafb", uvesafb_cn_callback);
if (err)
return err;
err = platform_driver_register(&uvesafb_driver);
if (!err) {
uvesafb_device = platform_device_alloc("uvesafb", 0);
if (uvesafb_device)
err = platform_device_add(uvesafb_device);
else
err = -ENOMEM;
if (err) {
platform_device_put(uvesafb_device);
platform_driver_unregister(&uvesafb_driver);
cn_del_callback(&uvesafb_cn_id);
return err;
}
err = driver_create_file(&uvesafb_driver.driver,
&driver_attr_v86d);
if (err) {
printk(KERN_WARNING "uvesafb: failed to register "
"attributes\n");
err = 0;
}
}
return err;
}
module_init(uvesafb_init);
static void __devexit uvesafb_exit(void)
{
struct uvesafb_ktask *task;
if (v86d_started) {
task = uvesafb_prep();
if (task) {
task->t.flags = TF_EXIT;
uvesafb_exec(task);
uvesafb_free(task);
}
}
cn_del_callback(&uvesafb_cn_id);
driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d);
platform_device_unregister(uvesafb_device);
platform_driver_unregister(&uvesafb_driver);
}
module_exit(uvesafb_exit);
static int param_set_scroll(const char *val, const struct kernel_param *kp)
{
ypan = 0;
if (!strcmp(val, "redraw"))
ypan = 0;
else if (!strcmp(val, "ypan"))
ypan = 1;
else if (!strcmp(val, "ywrap"))
ypan = 2;
else
return -EINVAL;
return 0;
}
static struct kernel_param_ops param_ops_scroll = {
.set = param_set_scroll,
};
#define param_check_scroll(name, p) __param_check(name, p, void)
module_param_named(scroll, ypan, scroll, 0);
MODULE_PARM_DESC(scroll,
"Scrolling mode, set to 'redraw', 'ypan', or 'ywrap'");
module_param_named(vgapal, pmi_setpal, invbool, 0);
MODULE_PARM_DESC(vgapal, "Set palette using VGA registers");
module_param_named(pmipal, pmi_setpal, bool, 0);
MODULE_PARM_DESC(pmipal, "Set palette using PMI calls");
module_param(mtrr, uint, 0);
MODULE_PARM_DESC(mtrr,
"Memory Type Range Registers setting. Use 0 to disable.");
module_param(blank, bool, 0);
MODULE_PARM_DESC(blank, "Enable hardware blanking");
module_param(nocrtc, bool, 0);
MODULE_PARM_DESC(nocrtc, "Ignore CRTC timings when setting modes");
module_param(noedid, bool, 0);
MODULE_PARM_DESC(noedid,
"Ignore EDID-provided monitor limits when setting modes");
module_param(vram_remap, uint, 0);
MODULE_PARM_DESC(vram_remap, "Set amount of video memory to be used [MiB]");
module_param(vram_total, uint, 0);
MODULE_PARM_DESC(vram_total, "Set total amount of video memoery [MiB]");
module_param(maxclk, ushort, 0);
MODULE_PARM_DESC(maxclk, "Maximum pixelclock [MHz], overrides EDID data");
module_param(maxhf, ushort, 0);
MODULE_PARM_DESC(maxhf,
"Maximum horizontal frequency [kHz], overrides EDID data");
module_param(maxvf, ushort, 0);
MODULE_PARM_DESC(maxvf,
"Maximum vertical frequency [Hz], overrides EDID data");
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option,
"Specify initial video mode as \"<xres>x<yres>[-<bpp>][@<refresh>]\"");
module_param(vbemode, ushort, 0);
MODULE_PARM_DESC(vbemode,
"VBE mode number to set, overrides the 'mode' option");
module_param_string(v86d, v86d_path, PATH_MAX, 0660);
MODULE_PARM_DESC(v86d, "Path to the v86d userspace helper.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Januszewski <spock@gentoo.org>");
MODULE_DESCRIPTION("Framebuffer driver for VBE2.0+ compliant graphics boards");
| manveru0/FeaCore_Phoenix_S3 | drivers/video/uvesafb.c | C | gpl-2.0 | 52,031 |
/* Copyright Statement:
*
* This software/firmware and related documentation ("MediaTek Software") are
* protected under relevant copyright laws. The information contained herein
* is confidential and proprietary to MediaTek Inc. and/or its licensors.
* Without the prior written permission of MediaTek inc. and/or its licensors,
* any reproduction, modification, use or disclosure of MediaTek Software,
* and information contained herein, in whole or in part, shall be strictly prohibited.
*
* MediaTek Inc. (C) 2010. All rights reserved.
*
* BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
* THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
* CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
* SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
* STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
* CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
*
* The following software/firmware and/or related documentation ("MediaTek Software")
* have been modified by MediaTek Inc. All revisions are subject to any receiver's
* applicable license agreements with MediaTek Inc.
*/
/*
* Copyright (C) 2007-2009 Freescale Semiconductor, Inc.
* Copyright (C) 2008-2009 MontaVista Software, Inc.
*
* Authors: Tony Li <tony.li@freescale.com>
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <pci.h>
#include <mpc83xx.h>
#include <asm/io.h>
DECLARE_GLOBAL_DATA_PTR;
#define PCIE_MAX_BUSES 2
#ifdef CONFIG_83XX_GENERIC_PCIE_REGISTER_HOSES
static int mpc83xx_pcie_remap_cfg(struct pci_controller *hose, pci_dev_t dev)
{
int bus = PCI_BUS(dev) - hose->first_busno;
immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
pex83xx_t *pex = &immr->pciexp[bus];
struct pex_outbound_window *out_win = &pex->bridge.pex_outbound_win[0];
u8 devfn = PCI_DEV(dev) << 3 | PCI_FUNC(dev);
u32 dev_base = bus << 24 | devfn << 16;
if (hose->indirect_type == INDIRECT_TYPE_NO_PCIE_LINK)
return -1;
/*
* Workaround for the HW bug: for Type 0 configure transactions the
* PCI-E controller does not check the device number bits and just
* assumes that the device number bits are 0.
*/
if (devfn & 0xf8)
return -1;
out_le32(&out_win->tarl, dev_base);
return 0;
}
#define cfg_read(val, addr, type, op) \
do { *val = op((type)(addr)); } while (0)
#define cfg_write(val, addr, type, op) \
do { op((type *)(addr), (val)); } while (0)
#define cfg_read_err(val) do { *val = -1; } while (0)
#define cfg_write_err(val) do { } while (0)
#define PCIE_OP(rw, size, type, op) \
static int pcie_##rw##_config_##size(struct pci_controller *hose, \
pci_dev_t dev, int offset, \
type val) \
{ \
int ret; \
\
ret = mpc83xx_pcie_remap_cfg(hose, dev); \
if (ret) { \
cfg_##rw##_err(val); \
return ret; \
} \
cfg_##rw(val, (void *)hose->cfg_addr + offset, type, op); \
return 0; \
}
PCIE_OP(read, byte, u8 *, in_8)
PCIE_OP(read, word, u16 *, in_le16)
PCIE_OP(read, dword, u32 *, in_le32)
PCIE_OP(write, byte, u8, out_8)
PCIE_OP(write, word, u16, out_le16)
PCIE_OP(write, dword, u32, out_le32)
static void mpc83xx_pcie_register_hose(int bus, struct pci_region *reg,
u8 link)
{
extern void disable_addr_trans(void); /* start.S */
static struct pci_controller pcie_hose[PCIE_MAX_BUSES];
struct pci_controller *hose = &pcie_hose[bus];
int i;
/*
* There are no spare BATs to remap all PCI-E windows for U-Boot, so
* disable translations. In general, this is not great solution, and
* that's why we don't register PCI-E hoses by default.
*/
disable_addr_trans();
for (i = 0; i < 2; i++, reg++) {
if (reg->size == 0)
break;
hose->regions[i] = *reg;
hose->region_count++;
}
i = hose->region_count++;
hose->regions[i].bus_start = 0;
hose->regions[i].phys_start = 0;
hose->regions[i].size = gd->ram_size;
hose->regions[i].flags = PCI_REGION_MEM | PCI_REGION_SYS_MEMORY;
i = hose->region_count++;
hose->regions[i].bus_start = CONFIG_SYS_IMMR;
hose->regions[i].phys_start = CONFIG_SYS_IMMR;
hose->regions[i].size = 0x100000;
hose->regions[i].flags = PCI_REGION_MEM | PCI_REGION_SYS_MEMORY;
hose->first_busno = pci_last_busno() + 1;
hose->last_busno = 0xff;
if (bus == 0)
hose->cfg_addr = (unsigned int *)CONFIG_SYS_PCIE1_CFG_BASE;
else
hose->cfg_addr = (unsigned int *)CONFIG_SYS_PCIE2_CFG_BASE;
pci_set_ops(hose,
pcie_read_config_byte,
pcie_read_config_word,
pcie_read_config_dword,
pcie_write_config_byte,
pcie_write_config_word,
pcie_write_config_dword);
if (!link)
hose->indirect_type = INDIRECT_TYPE_NO_PCIE_LINK;
pci_register_hose(hose);
#ifdef CONFIG_PCI_SCAN_SHOW
printf("PCI: Bus Dev VenId DevId Class Int\n");
#endif
/*
* Hose scan.
*/
hose->last_busno = pci_hose_scan(hose);
}
#else
static void mpc83xx_pcie_register_hose(int bus, struct pci_region *reg,
u8 link) {}
#endif /* CONFIG_83XX_GENERIC_PCIE_REGISTER_HOSES */
static void mpc83xx_pcie_init_bus(int bus, struct pci_region *reg)
{
immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
pex83xx_t *pex = &immr->pciexp[bus];
struct pex_outbound_window *out_win;
struct pex_inbound_window *in_win;
void *hose_cfg_base;
unsigned int ram_sz;
unsigned int barl;
unsigned int tar;
u16 reg16;
int i;
/* Enable pex csb bridge inbound & outbound transactions */
out_le32(&pex->bridge.pex_csb_ctrl,
in_le32(&pex->bridge.pex_csb_ctrl) | PEX_CSB_CTRL_OBPIOE |
PEX_CSB_CTRL_IBPIOE);
/* Enable bridge outbound */
out_le32(&pex->bridge.pex_csb_obctrl, PEX_CSB_OBCTRL_PIOE |
PEX_CSB_OBCTRL_MEMWE | PEX_CSB_OBCTRL_IOWE |
PEX_CSB_OBCTRL_CFGWE);
out_win = &pex->bridge.pex_outbound_win[0];
if (bus) {
out_le32(&out_win->ar, PEX_OWAR_EN | PEX_OWAR_TYPE_CFG |
CONFIG_SYS_PCIE2_CFG_SIZE);
out_le32(&out_win->bar, CONFIG_SYS_PCIE2_CFG_BASE);
} else {
out_le32(&out_win->ar, PEX_OWAR_EN | PEX_OWAR_TYPE_CFG |
CONFIG_SYS_PCIE1_CFG_SIZE);
out_le32(&out_win->bar, CONFIG_SYS_PCIE1_CFG_BASE);
}
out_le32(&out_win->tarl, 0);
out_le32(&out_win->tarh, 0);
for (i = 0; i < 2; i++, reg++) {
u32 ar;
if (reg->size == 0)
break;
out_win = &pex->bridge.pex_outbound_win[i + 1];
out_le32(&out_win->bar, reg->phys_start);
out_le32(&out_win->tarl, reg->bus_start);
out_le32(&out_win->tarh, 0);
ar = PEX_OWAR_EN | (reg->size & PEX_OWAR_SIZE);
if (reg->flags & PCI_REGION_IO)
ar |= PEX_OWAR_TYPE_IO;
else
ar |= PEX_OWAR_TYPE_MEM;
out_le32(&out_win->ar, ar);
}
out_le32(&pex->bridge.pex_csb_ibctrl, PEX_CSB_IBCTRL_PIOE);
ram_sz = gd->ram_size;
barl = 0;
tar = 0;
i = 0;
while (ram_sz > 0) {
in_win = &pex->bridge.pex_inbound_win[i];
out_le32(&in_win->barl, barl);
out_le32(&in_win->barh, 0x0);
out_le32(&in_win->tar, tar);
if (ram_sz >= 0x10000000) {
/* The maxium windows size is 256M */
out_le32(&in_win->ar, PEX_IWAR_EN | PEX_IWAR_NSOV |
PEX_IWAR_TYPE_PF | 0x0FFFF000);
barl += 0x10000000;
tar += 0x10000000;
ram_sz -= 0x10000000;
} else {
/* The UM is not clear here.
* So, round up to even Mb boundary */
ram_sz = ram_sz >> (20 +
((ram_sz & 0xFFFFF) ? 1 : 0));
if (!(ram_sz % 2))
ram_sz -= 1;
out_le32(&in_win->ar, PEX_IWAR_EN | PEX_IWAR_NSOV |
PEX_IWAR_TYPE_PF | (ram_sz << 20) | 0xFF000);
ram_sz = 0;
}
i++;
}
in_win = &pex->bridge.pex_inbound_win[i];
out_le32(&in_win->barl, CONFIG_SYS_IMMR);
out_le32(&in_win->barh, 0);
out_le32(&in_win->tar, CONFIG_SYS_IMMR);
out_le32(&in_win->ar, PEX_IWAR_EN |
PEX_IWAR_TYPE_NO_PF | PEX_IWAR_SIZE_1M);
/* Enable the host virtual INTX interrupts */
out_le32(&pex->bridge.pex_int_axi_misc_enb,
in_le32(&pex->bridge.pex_int_axi_misc_enb) | 0x1E0);
/* Hose configure header is memory-mapped */
hose_cfg_base = (void *)pex;
get_clocks();
/* Configure the PCIE controller core clock ratio */
out_le32(hose_cfg_base + PEX_GCLK_RATIO,
(((bus ? gd->pciexp2_clk : gd->pciexp1_clk) / 1000000) * 16)
/ 333);
udelay(1000000);
/* Do Type 1 bridge configuration */
out_8(hose_cfg_base + PCI_PRIMARY_BUS, 0);
out_8(hose_cfg_base + PCI_SECONDARY_BUS, 1);
out_8(hose_cfg_base + PCI_SUBORDINATE_BUS, 255);
/*
* Write to Command register
*/
reg16 = in_le16(hose_cfg_base + PCI_COMMAND);
reg16 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO |
PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
out_le16(hose_cfg_base + PCI_COMMAND, reg16);
/*
* Clear non-reserved bits in status register.
*/
out_le16(hose_cfg_base + PCI_STATUS, 0xffff);
out_8(hose_cfg_base + PCI_LATENCY_TIMER, 0x80);
out_8(hose_cfg_base + PCI_CACHE_LINE_SIZE, 0x08);
printf("PCIE%d: ", bus);
reg16 = in_le16(hose_cfg_base + PCI_LTSSM);
if (reg16 >= PCI_LTSSM_L0)
printf("link\n");
else
printf("No link\n");
mpc83xx_pcie_register_hose(bus, reg, reg16 >= PCI_LTSSM_L0);
}
/*
* The caller must have already set SCCR, SERDES and the PCIE_LAW BARs
* must have been set to cover all of the requested regions.
*/
void mpc83xx_pcie_init(int num_buses, struct pci_region **reg, int warmboot)
{
int i;
/*
* Release PCI RST Output signal.
* Power on to RST high must be at least 100 ms as per PCI spec.
* On warm boots only 1 ms is required.
*/
udelay(warmboot ? 1000 : 100000);
for (i = 0; i < num_buses; i++)
mpc83xx_pcie_init_bus(i, reg[i]);
}
| luckasfb/OT_903D-kernel-2.6.35.7 | bootable/bootloader/uboot/arch/powerpc/cpu/mpc83xx/pcie.c | C | gpl-2.0 | 11,152 |
/* $Id$ */
/** @file
* IPRT - User & Kernel Memory, Ring-0 Driver, Solaris.
*/
/*
* Copyright (C) 2009 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "the-solaris-kernel.h"
#include "internal/iprt.h"
#include <iprt/mem.h>
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
# include <iprt/asm-amd64-x86.h>
#endif
#include <iprt/assert.h>
#include <iprt/err.h>
RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb)
{
int rc;
RT_ASSERT_INTS_ON();
rc = ddi_copyin((const char *)R3PtrSrc, pvDst, cb, 0 /*flags*/);
if (RT_LIKELY(rc == 0))
return VINF_SUCCESS;
return VERR_ACCESS_DENIED;
}
RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb)
{
int rc;
RT_ASSERT_INTS_ON();
rc = ddi_copyout(pvSrc, (void *)R3PtrDst, cb, 0 /*flags*/);
if (RT_LIKELY(rc == 0))
return VINF_SUCCESS;
return VERR_ACCESS_DENIED;
}
RTR0DECL(bool) RTR0MemUserIsValidAddr(RTR3PTR R3Ptr)
{
return R3Ptr < kernelbase;
}
RTR0DECL(bool) RTR0MemKernelIsValidAddr(void *pv)
{
return (uintptr_t)pv >= kernelbase;
}
RTR0DECL(bool) RTR0MemAreKrnlAndUsrDifferent(void)
{
return true;
}
| dezelin/vbox-haiku | src/VBox/Runtime/r0drv/solaris/memuserkernel-r0drv-solaris.c | C | gpl-2.0 | 2,352 |
/**
* @file g_func.c
* @brief func_* edicts
*/
/*
All original material Copyright (C) 2002-2011 UFO: Alien Invasion.
Original file from Quake 2 v3.21: quake2-2.31/game/g_spawn.c
Copyright (C) 1997-2001 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "g_local.h"
/**
* @brief If an actor was standing on the breakable that is going to get destroyed, we have to let him fall to the ground
* @param self The breakable edict
* @param activator The touching edict
* @note This touch function is only executed if the func_breakable edict has a HP level of 0 (e.g. it is already destroyed)
* @return false because this is no client action
*/
static qboolean Touch_Breakable (edict_t *self, edict_t *activator)
{
/* not yet broken */
if (self->HP != 0)
return qfalse;
/** @todo check that the actor is standing upon the breakable */
if (G_IsActor(activator))
G_ActorFall(activator);
return qfalse;
}
static qboolean Destroy_Breakable (edict_t *self)
{
vec3_t origin;
const char *model = self->model;
VectorCenterFromMinsMaxs(self->absmin, self->absmax, origin);
/* the HP value is used to decide whether this was a triggered call or a
* call during a fight - a triggered call will be handled differently in
* terms of timing and the related particle effects in the client code */
if (self->HP == 0)
G_EventModelExplodeTriggered(self);
else
G_EventModelExplode(self);
if (self->particle)
G_SpawnParticle(origin, self->spawnflags, self->particle);
switch (self->material) {
case MAT_GLASS:
G_EventSpawnSound(PM_ALL, qfalse, self, origin, "misc/breakglass+");
break;
case MAT_METAL:
G_EventSpawnSound(PM_ALL, qfalse, self, origin, "misc/breakmetal+");
break;
case MAT_ELECTRICAL:
G_EventSpawnSound(PM_ALL, qfalse, self, origin, "misc/breakelectric+");
break;
case MAT_WOOD:
G_EventSpawnSound(PM_ALL, qfalse, self, origin, "misc/breakwood+");
break;
case MAT_MAX:
break;
}
G_TouchEdicts(self, 10.0f);
/* destroy the door trigger */
if (self->child)
G_FreeEdict(self->child);
/* now we can destroy the edict completely */
G_FreeEdict(self);
G_RecalcRouting(model);
return qtrue;
}
/**
* @brief func_breakable (0.3 0.3 0.3) ?
* Used for breakable objects.
* @note These edicts are added client side as local models,
* they are stored in the lmList (because they are inline models)
* for tracing (see inlineList in cmodel.c)
* @sa CM_EntTestLine
* @sa LM_AddModel
* @sa SV_SetModel
* @sa G_SendEdictsAndBrushModels
*/
void SP_func_breakable (edict_t *ent)
{
ent->classname = "breakable";
ent->type = ET_BREAKABLE;
ent->flags |= FL_DESTROYABLE;
/* set an inline model */
gi.SetModel(ent, ent->model);
ent->solid = SOLID_BSP;
gi.LinkEdict(ent);
Com_DPrintf(DEBUG_GAME, "func_breakable: model (%s) num: %i mins: %i %i %i maxs: %i %i %i origin: %i %i %i\n",
ent->model, ent->mapNum, (int)ent->mins[0], (int)ent->mins[1], (int)ent->mins[2],
(int)ent->maxs[0], (int)ent->maxs[1], (int)ent->maxs[2],
(int)ent->origin[0], (int)ent->origin[1], (int)ent->origin[2]);
ent->destroy = Destroy_Breakable;
ent->touch = Touch_Breakable;
}
/*
=============================================================================
DOOR FUNCTIONS
=============================================================================
*/
/**
* @brief Slides a door
* @note The new door state must already be set
* @param[in,out] door The entity of the inline model. The aabb of this bmodel will get updated
* in this function to reflect the new door position in the world
* @sa LET_SlideDoor
*/
static void Door_SlidingUse (edict_t *door)
{
const qboolean open = door->doorState == STATE_OPENED;
vec3_t moveAngles, moveDir, distanceVec;
int distance;
/* get the movement angle vector - a negative speed value will close the door*/
GET_SLIDING_DOOR_SHIFT_VECTOR(door->dir, open ? 1 : -1, moveAngles);
/* get the direction vector from the movement angles that were set on the entity */
AngleVectors(moveAngles, moveDir, NULL, NULL);
moveDir[0] = fabsf(moveDir[0]);
moveDir[1] = fabsf(moveDir[1]);
moveDir[2] = fabsf(moveDir[2]);
/* calculate the distance from the movement angles and the entity size. This is the
* distance the door has to slide to fully open or close */
distance = DotProduct(moveDir, door->size);
/* the door is moved in one step on the server side - lerping is not needed here - so we
* perform the scalar multiplication with the distance the door must move in order to
* fully close/open */
VectorMul(distance, moveAngles, distanceVec);
/* set the updated position. The bounding boxes that are used for tracing must be
* shifted when the door state changes. As the mins and maxs of the aabb are absolute
* world coordinates in the map we have to translate the position by the above
* calculated movement vector */
#if 0
/** @todo this is not yet working for tracing and pathfinding - check what must be done to
* allow shooting and walking through the opened door */
VectorAdd(door->origin, distanceVec, door->origin);
gi.SetInlineModelOrientation(door->model, door->origin, door->angles);
#else
VectorAdd(door->mins, distanceVec, door->mins);
VectorAdd(door->maxs, distanceVec, door->maxs);
#endif
}
/**
* @brief Opens/closes a door
* @note Use function for func_door
* @todo Check if the door can be opened or closed - there should not be
* anything in the way (e.g. an actor)
*/
static qboolean Door_Use (edict_t *door, edict_t *activator)
{
if (door->doorState == STATE_CLOSED) {
door->doorState = STATE_OPENED;
/* change rotation/origin and relink */
if (door->type == ET_DOOR) {
if (door->dir & DOOR_OPEN_REVERSE)
door->angles[door->dir & 3] -= DOOR_ROTATION_ANGLE;
else
door->angles[door->dir & 3] += DOOR_ROTATION_ANGLE;
} else if (door->type == ET_DOOR_SLIDING) {
Door_SlidingUse(door);
}
gi.LinkEdict(door);
/* maybe the server called this because the door starts opened */
if (G_MatchIsRunning()) {
/* let everybody know, that the door opens */
G_EventDoorOpen(door);
if (door->noise[0] != '\0')
G_EventSpawnSound(PM_ALL, qfalse, door, door->origin, door->noise);
}
} else if (door->doorState == STATE_OPENED) {
door->doorState = STATE_CLOSED;
/* change rotation and relink */
if (door->type == ET_DOOR) {
if (door->dir & DOOR_OPEN_REVERSE)
door->angles[door->dir & 3] += DOOR_ROTATION_ANGLE;
else
door->angles[door->dir & 3] -= DOOR_ROTATION_ANGLE;
} else if (door->type == ET_DOOR_SLIDING) {
Door_SlidingUse(door);
}
gi.LinkEdict(door);
/* closed is the standard, opened is handled above - we need an active
* team here already */
if (G_MatchIsRunning()) {
/* let everybody know, that the door closes */
G_EventDoorClose(door);
if (door->noise[0] != '\0')
G_EventSpawnSound(PM_ALL, qfalse, door, door->origin, door->noise);
}
} else
return qfalse;
/* Update model orientation */
gi.SetInlineModelOrientation(door->model, door->origin, door->angles);
Com_DPrintf(DEBUG_GAME, "Server processed door movement.\n");
/* Update path finding table */
G_RecalcRouting(door->model);
if (activator && G_IsLivingActor(activator)) {
/* Check if the player appears/perishes, seen from other teams. */
G_CheckVis(activator, qtrue);
/* Calc new vis for the activator. */
G_CheckVisTeamAll(activator->team, qfalse, activator);
}
return qtrue;
}
/**
* @brief Trigger to open the door we are standing in front of it
* @sa CL_DoorOpen
* @sa LE_CloseOpen
* @sa CL_ActorDoorAction
* @sa AI_CheckUsingDoor
*/
static qboolean Touch_DoorTrigger (edict_t *self, edict_t *activator)
{
if (self->owner && self->owner->inuse) {
if (G_IsAI(activator)) {
/* let the ai interact with the door */
if (self->flags & FL_GROUPSLAVE)
self = self->groupMaster;
if (AI_CheckUsingDoor(activator, self->owner))
G_ActorUseDoor(activator, self->owner);
/* we don't want the client action stuff to be send for ai actors */
return qfalse;
} else {
/* prepare for client action */
G_ActorSetClientAction(activator, self->owner);
return qtrue;
}
}
return qfalse;
}
/**
* @brief Left the door trigger zone - reset the client action
* @param self The trigger
* @param activator The edict that left the trigger zone
*/
static void Reset_DoorTrigger (edict_t *self, edict_t *activator)
{
if (activator->clientAction == self->owner)
G_ActorSetClientAction(activator, NULL);
}
#define REVERSE 0x00000200
/**
* @brief func_door (0 .5 .8) ?
* "health" if set, door is destroyable
* @sa SV_SetModel
* @sa LM_AddModel
* @sa G_SendEdictsAndBrushModels
*/
void SP_func_door (edict_t *ent)
{
edict_t *other;
ent->classname = "door";
ent->type = ET_DOOR;
if (!ent->noise)
ent->noise = "doors/open_close";
/* set an inline model */
gi.SetModel(ent, ent->model);
ent->solid = SOLID_BSP;
gi.LinkEdict(ent);
ent->doorState = STATE_CLOSED;
ent->dir = YAW;
if (ent->spawnflags & REVERSE)
ent->dir |= DOOR_OPEN_REVERSE;
if (ent->HP)
ent->flags |= FL_DESTROYABLE;
ent->flags |= FL_CLIENTACTION;
/* spawn the trigger entity */
other = G_TriggerSpawn(ent);
other->touch = Touch_DoorTrigger;
other->reset = Reset_DoorTrigger;
ent->child = other;
G_ActorSetTU(ent, TU_DOOR_ACTION);
ent->use = Door_Use;
/* the door should start opened */
if (ent->spawnflags & FL_TRIGGERED)
G_UseEdict(ent, NULL);
ent->destroy = Destroy_Breakable;
}
void SP_func_door_sliding (edict_t *ent)
{
ent->classname = "doorsliding";
ent->type = ET_DOOR_SLIDING;
if (!ent->noise)
ent->noise = "doors/slide";
/* set an inline model */
gi.SetModel(ent, ent->model);
ent->solid = SOLID_BSP;
gi.LinkEdict(ent);
if (ent->spawnflags & REVERSE)
ent->dir |= DOOR_OPEN_REVERSE;
if (ent->HP)
ent->flags |= FL_DESTROYABLE;
ent->doorState = STATE_CLOSED;
ent->speed = 10;
ent->use = Door_Use;
ent->destroy = Destroy_Breakable;
}
/**
* @brief Spawns a rotating solid inline brush model
* @sa SV_SetModel
* @sa LM_AddModel
*/
void SP_func_rotating (edict_t *ent)
{
ent->classname = "rotating";
ent->type = ET_ROTATING;
/* set an inline model */
gi.SetModel(ent, ent->model);
ent->solid = SOLID_BSP;
gi.LinkEdict(ent);
/* the lower, the faster */
if (!ent->speed)
ent->speed = 50;
if (ent->HP)
ent->flags |= FL_DESTROYABLE;
ent->destroy = Destroy_Breakable;
}
| ptitSeb/UFO--AI-OpenPandora | src/game/g_func.c | C | gpl-2.0 | 11,042 |
/*
* Copyright (C) 2003 Robert Kooima
*
* NEVERBALL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <math.h>
#include "vec3.h"
#include "common.h"
#include "solid_vary.h"
#include "solid_sim.h"
#include "solid_all.h"
#include "solid_cmd.h"
#define LARGE 1.0e+5f
#define SMALL 1.0e-3f
/*---------------------------------------------------------------------------*/
/* Solves (p + v * t) . (p + v * t) == r * r for smallest t. */
/*
* Given vectors A = P, B = V * t, C = A + B, |C| = r, solve for
* smallest t.
*
* Some useful dot product properties:
*
* 1) A . A = |A| * |A|
* 2) A . (B + C) = A . B + A . C
* 3) (r * A) . B = r * (A . B)
*
* Deriving a quadratic equation:
*
* C . C = r * r (1)
* (A + B) . (A + B) = r * r
* A . (A + B) + B . (A + B) = r * r (2)
* A . A + A . B + B . A + B . B = r * r (2)
* A . A + 2 * (A . B) + B . B = r * r
* P . P + 2 * (P . V * t) + (V * t . V * t) = r * r
* P . P + 2 * (P . V) * t + (V . V) * t * t = r * r (3)
* (V . V) * t * t + 2 * (P . V) * t + P . P - r * r = 0
*
* This equation is solved using the quadratic formula.
*/
static float v_sol(const float p[3], const float v[3], float r)
{
float a = v_dot(v, v);
float b = v_dot(v, p) * 2.0f;
float c = v_dot(p, p) - r * r;
float d = b * b - 4.0f * a * c;
/* HACK: This seems to cause failures to detect low-velocity collision
Yet, the potential division by zero below seems fine.
if (fabsf(a) < SMALL) return LARGE;
*/
if (d < 0.0f) return LARGE;
else if (d > 0.0f)
{
float t0 = 0.5f * (-b - fsqrtf(d)) / a;
float t1 = 0.5f * (-b + fsqrtf(d)) / a;
float t = (t0 < t1) ? t0 : t1;
return (t < 0.0f) ? LARGE : t;
}
else return -b * 0.5f / a;
}
/*---------------------------------------------------------------------------*/
/*
* Compute the earliest time and position of the intersection of a
* sphere and a vertex.
*
* The sphere has radius R and moves along vector V from point P. The
* vertex moves along vector W from point Q in a coordinate system
* based at O.
*/
static float v_vert(float Q[3],
const float o[3],
const float q[3],
const float w[3],
const float p[3],
const float v[3], float r)
{
float O[3], P[3], V[3];
float t = LARGE;
v_add(O, o, q);
v_sub(P, p, O);
v_sub(V, v, w);
if (v_dot(P, V) < 0.0f)
{
t = v_sol(P, V, r);
if (t < LARGE)
v_mad(Q, O, w, t);
}
return t;
}
/*
* Compute the earliest time and position of the intersection of a
* sphere and an edge.
*
* The sphere has radius R and moves along vector V from point P. The
* edge moves along vector W from point Q in a coordinate system based
* at O. The edge extends along the length of vector U.
*/
static float v_edge(float Q[3],
const float o[3],
const float q[3],
const float u[3],
const float w[3],
const float p[3],
const float v[3], float r)
{
float d[3], e[3];
float P[3], V[3];
float du, eu, uu, s, t;
v_sub(d, p, o);
v_sub(d, d, q);
v_sub(e, v, w);
/*
* Think projections. Vectors D, extending from the edge vertex Q
* to the sphere, and E, the relative velocity of sphere wrt the
* edge, are made orthogonal to the edge vector U. Division of
* the dot products is required to obtain the true projection
* ratios since U does not have unit length.
*/
du = v_dot(d, u);
eu = v_dot(e, u);
uu = v_dot(u, u);
v_mad(P, d, u, -du / uu);
/* First, test for intersection. */
if (v_dot(P, P) < r * r)
{
/* The sphere already intersects the line of the edge. */
if (du < 0 || du > uu)
{
/*
* The sphere is behind the endpoints of the edge, and
* can't hit the edge without hitting the vertices first.
*/
return LARGE;
}
/* The sphere already intersects the edge. */
if (v_dot(P, e) >= 0)
{
/* Moving apart. */
return LARGE;
}
v_nrm(P, P);
v_mad(Q, p, P, -r);
return 0;
}
v_mad(V, e, u, -eu / uu);
t = v_sol(P, V, r);
s = (du + eu * t) / uu; /* Projection of D + E * t on U. */
if (0.0f <= t && t < LARGE && 0.0f < s && s < 1.0f)
{
v_mad(d, o, w, t);
v_mad(e, q, u, s);
v_add(Q, e, d);
}
else
t = LARGE;
return t;
}
/*
* Compute the earliest time and position of the intersection of a
* sphere and a plane.
*
* The sphere has radius R and moves along vector V from point P. The
* plane moves along vector W. The plane has normal N and is
* positioned at distance D from the origin O along that normal.
*/
static float v_side(float Q[3],
const float o[3],
const float w[3],
const float n[3], float d,
const float p[3],
const float v[3], float r)
{
float vn = v_dot(v, n);
float wn = v_dot(w, n);
float t = LARGE;
if (vn - wn <= 0.0f)
{
float on = v_dot(o, n);
float pn = v_dot(p, n);
float u = (r + d + on - pn) / (vn - wn);
float a = ( d + on - pn) / (vn - wn);
if (0.0f <= u)
{
t = u;
v_mad(Q, p, v, +t);
v_mad(Q, Q, n, -r);
}
else if (0.0f <= a)
{
t = 0;
v_mad(Q, p, v, +t);
v_mad(Q, Q, n, -r);
}
}
return t;
}
/*---------------------------------------------------------------------------*/
/*
* Compute the new linear and angular velocities of a bouncing ball.
* Q gives the position of the point of impact and W gives the
* velocity of the object being impacted.
*/
static float sol_bounce(struct v_ball *up,
const float q[3],
const float w[3], float dt)
{
float n[3], r[3], d[3], vn, wn;
float *p = up->p;
float *v = up->v;
/* Find the normal of the impact. */
v_sub(r, p, q);
v_sub(d, v, w);
v_nrm(n, r);
/* Find the new angular velocity. */
v_crs(up->w, d, r);
v_scl(up->w, up->w, -1.0f / (up->r * up->r));
/* Find the new linear velocity. */
vn = v_dot(v, n);
wn = v_dot(w, n);
v_mad(v, v, n, 1.7 * (wn - vn));
v_mad(p, q, n, up->r);
/* Return the "energy" of the impact, to determine the sound amplitude. */
return fabsf(v_dot(n, d));
}
/*---------------------------------------------------------------------------*/
static float sol_test_vert(float dt,
float T[3],
const struct v_ball *up,
const struct b_vert *vp,
const float o[3],
const float w[3])
{
return v_vert(T, o, vp->p, w, up->p, up->v, up->r);
}
static float sol_test_edge(float dt,
float T[3],
const struct v_ball *up,
const struct s_base *base,
const struct b_edge *ep,
const float o[3],
const float w[3])
{
float q[3];
float u[3];
v_cpy(q, base->vv[ep->vi].p);
v_sub(u, base->vv[ep->vj].p, base->vv[ep->vi].p);
return v_edge(T, o, q, u, w, up->p, up->v, up->r);
}
static float sol_test_side(float dt,
float T[3],
const struct v_ball *up,
const struct s_base *base,
const struct b_lump *lp,
const struct b_side *sp,
const float o[3],
const float w[3])
{
float t = v_side(T, o, w, sp->n, sp->d, up->p, up->v, up->r);
int i;
if (t < dt)
for (i = 0; i < lp->sc; i++)
{
const struct b_side *sq = base->sv + base->iv[lp->s0 + i];
if (sp != sq &&
v_dot(T, sq->n) -
v_dot(o, sq->n) -
v_dot(w, sq->n) * t > sq->d)
return LARGE;
}
return t;
}
/*---------------------------------------------------------------------------*/
static int sol_test_fore(float dt,
const struct v_ball *up,
const struct b_side *sp,
const float o[3],
const float w[3])
{
float q[3], d;
/* If the ball is not behind the plane, the test passes. */
v_sub(q, up->p, o);
d = sp->d;
if (v_dot(q, sp->n) - d + up->r >= 0)
return 1;
/* If it's not behind the plane after DT seconds, the test passes. */
v_mad(q, q, up->v, dt);
d += v_dot(w, sp->n) * dt;
if (v_dot(q, sp->n) - d + up->r >= 0)
return 1;
/* Else, test fails. */
return 0;
}
static int sol_test_back(float dt,
const struct v_ball *up,
const struct b_side *sp,
const float o[3],
const float w[3])
{
float q[3], d;
/* If the ball is not in front of the plane, the test passes. */
v_sub(q, up->p, o);
d = sp->d;
if (v_dot(q, sp->n) - d - up->r <= 0)
return 1;
/* If it's not in front of the plane after DT seconds, the test passes. */
v_mad(q, q, up->v, dt);
d += v_dot(w, sp->n) * dt;
if (v_dot(q, sp->n) - d - up->r <= 0)
return 1;
/* Else, test fails. */
return 0;
}
/*---------------------------------------------------------------------------*/
static float sol_test_lump(float dt,
float T[3],
const struct v_ball *up,
const struct s_base *base,
const struct b_lump *lp,
const float o[3],
const float w[3])
{
float U[3] = { 0.0f, 0.0f, 0.0f };
float u, t = dt;
int i;
/* Short circuit a non-solid lump. */
if (lp->fl & L_DETAIL) return t;
/* Test all verts */
if (up->r > 0.0f)
for (i = 0; i < lp->vc; i++)
{
const struct b_vert *vp = base->vv + base->iv[lp->v0 + i];
if ((u = sol_test_vert(t, U, up, vp, o, w)) < t)
{
v_cpy(T, U);
t = u;
}
}
/* Test all edges */
if (up->r > 0.0f)
for (i = 0; i < lp->ec; i++)
{
const struct b_edge *ep = base->ev + base->iv[lp->e0 + i];
if ((u = sol_test_edge(t, U, up, base, ep, o, w)) < t)
{
v_cpy(T, U);
t = u;
}
}
/* Test all sides */
for (i = 0; i < lp->sc; i++)
{
const struct b_side *sp = base->sv + base->iv[lp->s0 + i];
if ((u = sol_test_side(t, U, up, base, lp, sp, o, w)) < t)
{
v_cpy(T, U);
t = u;
}
}
return t;
}
static float sol_test_node(float dt,
float T[3],
const struct v_ball *up,
const struct s_base *base,
const struct b_node *np,
const float o[3],
const float w[3])
{
float U[3], u, t = dt;
int i;
/* Test all lumps */
for (i = 0; i < np->lc; i++)
{
const struct b_lump *lp = base->lv + np->l0 + i;
if ((u = sol_test_lump(t, U, up, base, lp, o, w)) < t)
{
v_cpy(T, U);
t = u;
}
}
/* Test in front of this node */
if (np->ni >= 0 && sol_test_fore(t, up, base->sv + np->si, o, w))
{
const struct b_node *nq = base->nv + np->ni;
if ((u = sol_test_node(t, U, up, base, nq, o, w)) < t)
{
v_cpy(T, U);
t = u;
}
}
/* Test behind this node */
if (np->nj >= 0 && sol_test_back(t, up, base->sv + np->si, o, w))
{
const struct b_node *nq = base->nv + np->nj;
if ((u = sol_test_node(t, U, up, base, nq, o, w)) < t)
{
v_cpy(T, U);
t = u;
}
}
return t;
}
static float sol_test_body(float dt,
float T[3], float V[3],
const struct v_ball *up,
const struct s_vary *vary,
const struct v_body *bp)
{
float U[3], O[3], E[4], W[3], u;
const struct b_node *np = vary->base->nv + bp->base->ni;
sol_body_p(O, vary, bp, 0.0f);
sol_body_v(W, vary, bp, dt);
sol_body_e(E, vary, bp, 0.0f);
/*
* For rotating bodies, rather than rotate every normal and vertex
* of the body, we temporarily pretend the ball is rotating and
* moving about a static body.
*/
/*
* Linear velocity of a point rotating about the origin:
* v = w x p
*/
if (E[0] != 1.0f || sol_body_w(vary, bp))
{
/* The body has a non-identity orientation or it is rotating. */
struct v_ball ball;
float e[4], p0[3], p1[3];
const float z[3] = { 0 };
/* First, calculate position at start and end of time interval. */
v_sub(p0, up->p, O);
v_cpy(p1, p0);
q_conj(e, E);
q_rot(p0, e, p0);
v_mad(p1, p1, up->v, dt);
v_mad(p1, p1, W, -dt);
sol_body_e(e, vary, bp, dt);
q_conj(e, e);
q_rot(p1, e, p1);
/* Set up ball struct with values relative to body. */
ball = *up;
v_cpy(ball.p, p0);
/* Calculate velocity from start/end positions and time. */
v_sub(ball.v, p1, p0);
v_scl(ball.v, ball.v, 1.0f / dt);
if ((u = sol_test_node(dt, U, &ball, vary->base, np, z, z)) < dt)
{
/* Compute the final orientation. */
sol_body_e(e, vary, bp, u);
/* Return world space coordinates. */
q_rot(T, e, U);
v_add(T, O, T);
/* Move forward. */
v_mad(T, T, W, u);
/* Express "non-ball" velocity. */
q_rot(V, e, ball.v);
v_sub(V, up->v, V);
dt = u;
}
}
else
{
if ((u = sol_test_node(dt, U, up, vary->base, np, O, W)) < dt)
{
v_cpy(T, U);
v_cpy(V, W);
dt = u;
}
}
return dt;
}
static float sol_test_file(float dt,
float T[3], float V[3],
const struct v_ball *up,
const struct s_vary *vary)
{
float U[3], W[3], u, t = dt;
int i;
for (i = 0; i < vary->bc; i++)
{
const struct v_body *bp = vary->bv + i;
if ((u = sol_test_body(t, U, W, up, vary, bp)) < t)
{
v_cpy(T, U);
v_cpy(V, W);
t = u;
}
}
return t;
}
/*---------------------------------------------------------------------------*/
/*
* Track simulation steps in integer milliseconds.
*/
static float ms_accum;
static void ms_init(void)
{
ms_accum = 0.0f;
}
static int ms_step(float dt)
{
int ms = 0;
ms_accum += dt;
while (ms_accum >= 0.001f)
{
ms_accum -= 0.001f;
ms += 1;
}
return ms;
}
static int ms_peek(float dt)
{
int ms = 0;
float at;
at = ms_accum + dt;
while (at >= 0.001f)
{
at -= 0.001f;
ms += 1;
}
return ms;
}
/*---------------------------------------------------------------------------*/
/*
* Step the physics forward DT seconds under the influence of gravity
* vector G. If the ball gets pinched between two moving solids, this
* loop might not terminate. It is better to do something physically
* impossible than to lock up the game. So, if we make more than C
* iterations, punt it.
*/
float sol_step(struct s_vary *vary, const float *g, float dt, int ui, int *m)
{
float P[3], V[3], v[3], r[3], a[3], d, e, nt, b = 0.0f, tt = dt;
int c;
union cmd cmd;
if (ui < vary->uc)
{
struct v_ball *up = vary->uv + ui;
/* If the ball is in contact with a surface, apply friction. */
v_cpy(a, up->v);
v_cpy(v, up->v);
v_cpy(up->v, g);
if (m && sol_test_file(tt, P, V, up, vary) < 0.0005f)
{
v_cpy(up->v, v);
v_sub(r, P, up->p);
if ((d = v_dot(r, g) / (v_len(r) * v_len(g))) > 0.999f)
{
if ((e = (v_len(up->v) - dt)) > 0.0f)
{
/* Scale the linear velocity. */
v_nrm(up->v, up->v);
v_scl(up->v, up->v, e);
/* Scale the angular velocity. */
v_sub(v, V, up->v);
v_crs(up->w, v, r);
v_scl(up->w, up->w, -1.0f / (up->r * up->r));
}
else
{
/* Friction has brought the ball to a stop. */
up->v[0] = 0.0f;
up->v[1] = 0.0f;
up->v[2] = 0.0f;
(*m)++;
}
}
else v_mad(up->v, v, g, tt);
}
else v_mad(up->v, v, g, tt);
/* Test for collision. */
for (c = 16; c > 0 && tt > 0; c--)
{
float st;
int mi, ms;
/* HACK: avoid stepping across path changes. */
st = tt;
for (mi = 0; mi < vary->mc; mi++)
{
struct v_move *mp = vary->mv + mi;
struct v_path *pp = vary->pv + mp->pi;
if (!pp->f)
continue;
if (mp->tm + ms_peek(st) > pp->base->tm)
st = MS_TO_TIME(pp->base->tm - mp->tm);
}
/* Miss collisions if we reach the iteration limit. */
if (c > 1)
nt = sol_test_file(st, P, V, up, vary);
else
nt = tt;
cmd.type = CMD_STEP_SIMULATION;
cmd.stepsim.dt = nt;
sol_cmd_enq(&cmd);
ms = ms_step(nt);
sol_move_step(vary, nt, ms);
sol_swch_step(vary, nt, ms);
sol_ball_step(vary, nt);
if (nt < st)
if (b < (d = sol_bounce(up, P, V, nt)))
b = d;
tt -= nt;
}
v_sub(a, up->v, a);
sol_pendulum(up, a, g, dt);
}
return b;
}
/*---------------------------------------------------------------------------*/
void sol_init_sim(struct s_vary *vary)
{
ms_init();
}
void sol_quit_sim(void)
{
return;
}
/*---------------------------------------------------------------------------*/
| drodin/neverball-old | share/solid_sim_sol.c | C | gpl-2.0 | 19,801 |
/*
* (C) Copyright 2008-2013 STMicroelectronics.
*
* Sean McGoogan <Sean.McGoogan@st.com>
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <command.h>
#include <asm/soc.h>
#include <asm/socregs.h>
#include <asm/io.h>
#include <asm/pio.h>
void flashWriteEnable(void)
{
/* Enable Vpp for writing to flash */
/* Nothing to do! */
}
void flashWriteDisable(void)
{
/* Disable Vpp for writing to flash */
/* Nothing to do! */
}
#if defined(CONFIG_ST40_STXH415)
#define PIOALT(port, pin, alt, dir) \
do \
{ \
stxh415_pioalt_select((port), (pin), (alt)); \
stxh415_pioalt_pad((port), (pin), (dir)); \
} while(0)
#elif defined(CONFIG_ST40_STXH416)
#define PIOALT(port, pin, alt, dir) \
do \
{ \
stxh416_pioalt_select((port), (pin), (alt)); \
stxh416_pioalt_pad((port), (pin), (dir)); \
} while(0)
#endif /* CONFIG_ST40_STXH415/CONFIG_ST40_STXH416 */
/*
* MII0: PIO106[2] = GMII0_notRESET
* MII0: PIO13[4] = PIO6_CLKSEL (DVO_CK)
*
* MII1: PIO4[7] = GMII1_notRESET (needs J39 fitted)
* MII1: PIO2[5] = PIO6_CLKSEL (PIO_HDMI_TX_HPD)
*/
#if CONFIG_SYS_STM_STMAC_BASE == CONFIG_SYS_STM_STMAC0_BASE /* MII0, on CN22 */
# define GMII_PHY_NOT_RESET 106, 2
# define GMII_PHY_CLKOUT_NOT_TXCLK_SEL 13, 4
#elif CONFIG_SYS_STM_STMAC_BASE == CONFIG_SYS_STM_STMAC1_BASE /* MII1, on CN23 */
# define GMII_PHY_NOT_RESET 4, 7
# define GMII_PHY_CLKOUT_NOT_TXCLK_SEL 2, 5
#endif
static void configPIO(void)
{
/* Setup PIOs for ASC device */
#if defined(CONFIG_ST40_STXH415)
#if CONFIG_SYS_STM_ASC_BASE == STXH415_ASC2_BASE
/* Route UART2 via PIO17 for TX, RX, CTS & RTS (Alternative #1) */
PIOALT(17, 4, 2, stm_pad_direction_output); /* UART2-TX */
PIOALT(17, 5, 2, stm_pad_direction_input); /* UART2-RX */
PIOALT(17, 7, 2, stm_pad_direction_output); /* UART2-RTS */
PIOALT(17, 6, 2, stm_pad_direction_input); /* UART2-CTS */
#elif CONFIG_SYS_STM_ASC_BASE == STXH415_SBC_ASC0_BASE
/* Route SBC_UART0 via PIO3 for TX, RX, CTS & RTS (Alternative #1) */
PIOALT(3, 4, 1, stm_pad_direction_output); /* SBC_UART0-TX */
PIOALT(3, 5, 1, stm_pad_direction_input); /* SBC_UART0-RX */
PIOALT(3, 7, 1, stm_pad_direction_output); /* SBC_UART0-RTS */
PIOALT(3, 6, 1, stm_pad_direction_input); /* SBC_UART0-CTS */
#elif CONFIG_SYS_STM_ASC_BASE == STXH415_SBC_ASC1_BASE
/* Route SBC_UART1 via PIO2,3 for TX, RX, CTS & RTS (Alternative #3) */
PIOALT(2, 6, 3, stm_pad_direction_output); /* SBC_UART1-TX */
PIOALT(2, 7, 3, stm_pad_direction_input); /* SBC_UART1-RX */
PIOALT(3, 1, 3, stm_pad_direction_output); /* SBC_UART1-RTS */
PIOALT(3, 0, 3, stm_pad_direction_input); /* SBC_UART1-CTS */
#else
#error Unknown ASC port selected!
#endif /* CONFIG_SYS_STM_ASC_BASE == STXH415_ASCx_REGS_BASE */
#elif defined(CONFIG_ST40_STXH416)
#if CONFIG_SYS_STM_ASC_BASE == STXH416_ASC2_BASE
/* Route UART2 via PIO17 for TX, RX, CTS & RTS (Alternative #1) */
PIOALT(17, 4, 2, stm_pad_direction_output); /* UART2-TX */
PIOALT(17, 5, 2, stm_pad_direction_input); /* UART2-RX */
PIOALT(17, 7, 2, stm_pad_direction_output); /* UART2-RTS */
PIOALT(17, 6, 2, stm_pad_direction_input); /* UART2-CTS */
#elif CONFIG_SYS_STM_ASC_BASE == STXH416_SBC_ASC0_BASE
/* Route SBC_UART0 via PIO3 for TX, RX, CTS & RTS (Alternative #1) */
PIOALT(3, 4, 1, stm_pad_direction_output); /* SBC_UART0-TX */
PIOALT(3, 5, 1, stm_pad_direction_input); /* SBC_UART0-RX */
PIOALT(3, 7, 1, stm_pad_direction_output); /* SBC_UART0-RTS */
PIOALT(3, 6, 1, stm_pad_direction_input); /* SBC_UART0-CTS */
#elif CONFIG_SYS_STM_ASC_BASE == STXH416_SBC_ASC1_BASE
/* Route SBC_UART1 via PIO2,3 for TX, RX, CTS & RTS (Alternative #3) */
PIOALT(2, 6, 3, stm_pad_direction_output); /* SBC_UART1-TX */
PIOALT(2, 7, 3, stm_pad_direction_input); /* SBC_UART1-RX */
PIOALT(3, 1, 3, stm_pad_direction_output); /* SBC_UART1-RTS */
PIOALT(3, 0, 3, stm_pad_direction_input); /* SBC_UART1-CTS */
#else
#error Unknown ASC port selected!
#endif /* CONFIG_SYS_STM_ASC_BASE == STXH416_ASCx_REGS_BASE */
#endif /* CONFIG_ST40_STXH415/CONFIG_ST40_STXH416 */
#ifdef CONFIG_DRIVER_NET_STM_GMAC
/*
* Configure the Ethernet PHY Reset signal
*/
SET_PIO_PIN2(GMII_PHY_NOT_RESET, STPIO_OUT);
/*
* Configure the Ethernet PHY Mux PIO clock signal,
* as an output, which controls the speed of the MAC.
*/
SET_PIO_PIN2(GMII_PHY_CLKOUT_NOT_TXCLK_SEL, STPIO_OUT);
STPIO_SET_PIN2(GMII_PHY_CLKOUT_NOT_TXCLK_SEL, 1);
#endif /* CONFIG_DRIVER_NET_STM_GMAC */
}
#ifdef CONFIG_DRIVER_NET_STM_GMAC
extern void stmac_phy_reset(void)
{
/*
* Reset the Ethernet PHY.
*/
STPIO_SET_PIN2(GMII_PHY_NOT_RESET, 0);
udelay(10000); /* 10 ms */
STPIO_SET_PIN2(GMII_PHY_NOT_RESET, 1);
udelay(10000); /* 10 ms */
}
#endif /* CONFIG_DRIVER_NET_STM_GMAC */
#ifdef CONFIG_DRIVER_NET_STM_GMAC
extern void stmac_set_mac_speed(int speed)
{
/* Manage the MAC speed */
STPIO_SET_PIN2(GMII_PHY_CLKOUT_NOT_TXCLK_SEL,
(speed==1000)?1:0);
}
#endif /* CONFIG_DRIVER_NET_STM_GMAC */
extern int board_init(void)
{
configPIO();
#if 0 /* QQQ - TO IMPLEMENT */
#if defined(CONFIG_ST40_STM_SATA)
stx7105_configure_sata ();
#endif /* CONFIG_ST40_STM_SATA */
#endif /* QQQ - TO IMPLEMENT */
/*
* B2032A (MII) Ethernet card (*not* GMII)
* On B2000B board, to get GMAC0 working make sure that jumper
* on PIN 9-10 on CN35 and CN36 are removed.
*
*******************************************************************
*
* B2035A (RMII + MMC(on CN22)) Ethernet + MMC card
* B2035A board has IP101ALF PHY connected in RMII mode
* and an MMC card
* It is designed to be connected to GMAC0 (CN22) to get MMC working,
* however we can connect it to GMAC1 for RMII testing.
*
*******************************************************************
*
* Note: The following (default) configuration assumes we are using
* the B2032 daughter board, in MII mode (not GMII). To use other
* configurations, then please have a look in the STLinux kernel
* distribution source trees for: arch/sh/boards/mach-b2000/setup.c
*/
#ifdef CONFIG_DRIVER_NET_STM_GMAC
#if CONFIG_SYS_STM_STMAC_BASE == CONFIG_SYS_STM_STMAC0_BASE /* MII0, on CN22 */
# if defined(CONFIG_STMAC_IP1001) /* IC+ IP1001 (B2032) */
#if defined(CONFIG_ST40_STXH415)
stxh415_configure_ethernet(0, &(struct stxh415_ethernet_config) {
.mode = stxh415_ethernet_mode_mii,
.ext_clk = 1,
.phy_bus = 0, });
#elif defined(CONFIG_ST40_STXH416)
stxh416_configure_ethernet(0, &(struct stxh416_ethernet_config) {
.mode = stxh416_ethernet_mode_mii,
.ext_clk = 1,
.phy_bus = 0, });
#endif /* CONFIG_ST40_STXH415/CONFIG_ST40_STXH416 */
# elif defined(CONFIG_STMAC_IP101A) /* IC+ IP101A (B2035) */
#if defined(CONFIG_ST40_STXH415)
stxh415_configure_ethernet(0, &(struct stxh415_ethernet_config) {
.mode = stxh415_ethernet_mode_rmii,
.ext_clk = 0,
.phy_bus = 0, });
#elif defined(CONFIG_ST40_STXH416)
stxh416_configure_ethernet(0, &(struct stxh416_ethernet_config) {
.mode = stxh416_ethernet_mode_rmii,
.ext_clk = 0,
.phy_bus = 0, });
#endif /* CONFIG_ST40_STXH415/CONFIG_ST40_STXH416 */
# else
# error Unknown PHY type associated with STM GMAC #0
# endif /* CONFIG_STMAC_IP1001 || CONFIG_STMAC_IP101A */
#elif CONFIG_SYS_STM_STMAC_BASE == CONFIG_SYS_STM_STMAC1_BASE /* MII1, on CN23 */
# if defined(CONFIG_STMAC_IP1001) /* IC+ IP1001 (B2032) */
#if defined(CONFIG_ST40_STXH415)
stxh415_configure_ethernet(1, &(struct stxh415_ethernet_config) {
.mode = stxh415_ethernet_mode_mii,
.ext_clk = 1,
.phy_bus = 1, });
#elif defined(CONFIG_ST40_STXH416)
stxh416_configure_ethernet(1, &(struct stxh416_ethernet_config) {
.mode = stxh416_ethernet_mode_mii,
.ext_clk = 1,
.phy_bus = 1, });
#endif /* CONFIG_ST40_STXH415/CONFIG_ST40_STXH416 */
# elif defined(CONFIG_STMAC_IP101A) /* IC+ IP101A (B2035) */
#if defined(CONFIG_ST40_STXH415)
stxh415_configure_ethernet(1, &(struct stxh415_ethernet_config) {
.mode = stxh415_ethernet_mode_rmii,
.ext_clk = 0,
.phy_bus = 1, });
#elif defined(CONFIG_ST40_STXH416)
stxh416_configure_ethernet(1, &(struct stxh416_ethernet_config) {
.mode = stxh416_ethernet_mode_rmii,
.ext_clk = 0,
.phy_bus = 1, });
#endif /* CONFIG_ST40_STXH415/CONFIG_ST40_STXH416 */
# else
# error Unknown PHY type associated with STM GMAC #1
# endif /* CONFIG_STMAC_IP1001 || CONFIG_STMAC_IP101A */
#else
#error Unknown base address for the STM GMAC
#endif
/* Hard Reset the PHY -- do after we have configured the MAC */
stmac_phy_reset();
#endif /* CONFIG_DRIVER_NET_STM_GMAC */
#if defined(CONFIG_CMD_I2C)
#if defined(CONFIG_ST40_STXH415)
stxh415_configure_i2c();
#elif defined(CONFIG_ST40_STXH416)
stxh416_configure_i2c();
#endif /* CONFIG_ST40_STXH415/CONFIG_ST40_STXH416 */
#endif /* CONFIG_CMD_I2C */
return 0;
}
int checkboard (void)
{
printf ("\n\nBoard: B2000"
#if defined(CONFIG_ST40_STXH415)
"-STxH415"
#elif defined(CONFIG_ST40_STXH416)
"-STxH416"
#endif
#ifdef CONFIG_ST40_SE_MODE
" [32-bit mode]"
#else
" [29-bit mode]"
#endif
"\n");
#if defined(CONFIG_SOFT_SPI)
/*
* Configure for the SPI Serial Flash.
* Note: for CONFIG_SYS_BOOT_FROM_SPI + CONFIG_ENV_IS_IN_EEPROM, this
* needs to be done after env_init(), hence it is done
* here, and not in board_init().
*/
#if defined(CONFIG_ST40_STXH415)
stxh415_configure_spi();
#elif defined(CONFIG_ST40_STXH416)
stxh416_configure_spi();
#endif /* CONFIG_ST40_STXH415/CONFIG_ST40_STXH416 */
#endif /* CONFIG_SPI */
return 0;
}
| Blagus/STB8000-U-Boot | board/st/b2000/b2000.c | C | gpl-2.0 | 10,205 |
/* $Id: mpnotification-r0drv-solaris.c $ */
/** @file
* IPRT - Multiprocessor Event Notifications, Ring-0 Driver, Solaris.
*/
/*
* Copyright (C) 2008-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
/*********************************************************************************************************************************
* Header Files *
*********************************************************************************************************************************/
#include "the-solaris-kernel.h"
#include "internal/iprt.h"
#include <iprt/err.h>
#include <iprt/mp.h>
#include <iprt/cpuset.h>
#include <iprt/string.h>
#include <iprt/thread.h>
#include "r0drv/mp-r0drv.h"
/*********************************************************************************************************************************
* Global Variables *
*********************************************************************************************************************************/
/** Whether CPUs are being watched or not. */
static volatile bool g_fSolCpuWatch = false;
/** Set of online cpus that is maintained by the MP callback.
* This avoids locking issues querying the set from the kernel as well as
* eliminating any uncertainty regarding the online status during the
* callback. */
RTCPUSET g_rtMpSolCpuSet;
/**
* Internal solaris representation for watching CPUs.
*/
typedef struct RTMPSOLWATCHCPUS
{
/** Function pointer to Mp worker. */
PFNRTMPWORKER pfnWorker;
/** Argument to pass to the Mp worker. */
void *pvArg;
} RTMPSOLWATCHCPUS;
typedef RTMPSOLWATCHCPUS *PRTMPSOLWATCHCPUS;
/**
* Solaris callback function for Mp event notification.
*
* @returns Solaris error code.
* @param CpuState The current event/state of the CPU.
* @param iCpu Which CPU is this event for.
* @param pvArg Ignored.
*
* @remarks This function assumes index == RTCPUID.
* We may -not- be firing on the CPU going online/offline and called
* with preemption enabled.
*/
static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg)
{
RTMPEVENT enmMpEvent;
/*
* Update our CPU set structures first regardless of whether we've been
* scheduled on the right CPU or not, this is just atomic accounting.
*/
if (CpuState == CPU_ON)
{
enmMpEvent = RTMPEVENT_ONLINE;
RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu);
}
else if (CpuState == CPU_OFF)
{
enmMpEvent = RTMPEVENT_OFFLINE;
RTCpuSetDel(&g_rtMpSolCpuSet, iCpu);
}
else
return 0;
rtMpNotificationDoCallbacks(enmMpEvent, iCpu);
NOREF(pvArg);
return 0;
}
DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
{
if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
return VERR_WRONG_ORDER;
/*
* Register the callback building the online cpu set as we do so.
*/
RTCpuSetEmpty(&g_rtMpSolCpuSet);
mutex_enter(&cpu_lock);
register_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);
for (int i = 0; i < (int)RTMpGetCount(); ++i)
if (cpu_is_online(cpu[i]))
rtMpNotificationCpuEvent(CPU_ON, i, NULL /* pvArg */);
ASMAtomicWriteBool(&g_fSolCpuWatch, true);
mutex_exit(&cpu_lock);
return VINF_SUCCESS;
}
DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
{
if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
{
mutex_enter(&cpu_lock);
unregister_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);
ASMAtomicWriteBool(&g_fSolCpuWatch, false);
mutex_exit(&cpu_lock);
}
}
| miguelinux/vbox | src/VBox/Runtime/r0drv/solaris/mpnotification-r0drv-solaris.c | C | gpl-2.0 | 4,766 |
/*
* mms_ts.c - Touchscreen driver for Melfas MMS-series touch controllers
*
* Copyright (C) 2011 Google Inc.
* Author: Dima Zavin <dima@android.com>
* Simon Wilson <simonwilson@google.com>
*
* ISP reflashing code based on original code from Melfas.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#define DEBUG
/* #define VERBOSE_DEBUG */
/*#define SEC_TSP_DEBUG*/
/* #define SEC_TSP_VERBOSE_DEBUG */
/* #define FORCE_FW_FLASH */
/* #define FORCE_FW_PASS */
/* #define ESD_DEBUG */
#define SEC_TSP_FACTORY_TEST
#define SEC_TSP_FW_UPDATE
#define TSP_BUF_SIZE 1024
#define FAIL -1
#include <linux/delay.h>
#include <linux/earlysuspend.h>
#include <linux/firmware.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <mach/gpio.h>
#include <linux/uaccess.h>
#include <mach/cpufreq.h>
#include <mach/dev.h>
#include <linux/platform_data/mms_ts.h>
#include <asm/unaligned.h>
#define MAX_FINGERS 10
#define MAX_WIDTH 30
#define MAX_PRESSURE 255
#define MAX_ANGLE 90
#define MIN_ANGLE -90
/* Registers */
#define MMS_MODE_CONTROL 0x01
#define MMS_XYRES_HI 0x02
#define MMS_XRES_LO 0x03
#define MMS_YRES_LO 0x04
#define MMS_INPUT_EVENT_PKT_SZ 0x0F
#define MMS_INPUT_EVENT0 0x10
#define FINGER_EVENT_SZ 8
#define MMS_TSP_REVISION 0xF0
#define MMS_HW_REVISION 0xF1
#define MMS_COMPAT_GROUP 0xF2
#define MMS_FW_VERSION 0xF3
enum {
ISP_MODE_FLASH_ERASE = 0x59F3,
ISP_MODE_FLASH_WRITE = 0x62CD,
ISP_MODE_FLASH_READ = 0x6AC9,
};
/* each address addresses 4-byte words */
#define ISP_MAX_FW_SIZE (0x1F00 * 4)
#define ISP_IC_INFO_ADDR 0x1F00
#ifdef SEC_TSP_FW_UPDATE
#define WORD_SIZE 4
#define ISC_PKT_SIZE 1029
#define ISC_PKT_DATA_SIZE 1024
#define ISC_PKT_HEADER_SIZE 3
#define ISC_PKT_NUM 31
#define ISC_ENTER_ISC_CMD 0x5F
#define ISC_ENTER_ISC_DATA 0x01
#define ISC_CMD 0xAE
#define ISC_ENTER_UPDATE_DATA 0x55
#define ISC_ENTER_UPDATE_DATA_LEN 9
#define ISC_DATA_WRITE_SUB_CMD 0xF1
#define ISC_EXIT_ISC_SUB_CMD 0x0F
#define ISC_EXIT_ISC_SUB_CMD2 0xF0
#define ISC_CHECK_STATUS_CMD 0xAF
#define ISC_CONFIRM_CRC 0x03
#define ISC_DEFAULT_CRC 0xFFFF
#endif
#ifdef SEC_TSP_FACTORY_TEST
#define TX_NUM 26
#define RX_NUM 14
#define NODE_NUM 364 /* 26x14 */
/* VSC(Vender Specific Command) */
#define MMS_VSC_CMD 0xB0 /* vendor specific command */
#define MMS_VSC_MODE 0x1A /* mode of vendor */
#define MMS_VSC_CMD_ENTER 0X01
#define MMS_VSC_CMD_CM_DELTA 0X02
#define MMS_VSC_CMD_CM_ABS 0X03
#define MMS_VSC_CMD_EXIT 0X05
#define MMS_VSC_CMD_INTENSITY 0X04
#define MMS_VSC_CMD_RAW 0X06
#define MMS_VSC_CMD_REFER 0X07
#define TSP_CMD_STR_LEN 32
#define TSP_CMD_RESULT_STR_LEN 512
#define TSP_CMD_PARAM_NUM 8
#endif /* SEC_TSP_FACTORY_TEST */
/* Touch booster */
#if defined(CONFIG_EXYNOS4_CPUFREQ) &&\
defined(CONFIG_BUSFREQ_OPP)
#define TOUCH_BOOSTER 1
#define TOUCH_BOOSTER_OFF_TIME 100
#define TOUCH_BOOSTER_CHG_TIME 200
#else
#define TOUCH_BOOSTER 0
#endif
struct device *sec_touchscreen;
static struct device *bus_dev;
int touch_is_pressed = 0;
#define ISC_DL_MODE 1
/* 4.8" OCTA LCD */
#define FW_VERSION_4_8 0xBD
#define MAX_FW_PATH 255
#define TSP_FW_FILENAME "melfas_fw.bin"
#if ISC_DL_MODE /* ISC_DL_MODE start */
/*
* Default configuration of ISC mode
*/
#define DEFAULT_SLAVE_ADDR 0x48
#define SECTION_NUM 4
#define SECTION_NAME_LEN 5
#define PAGE_HEADER 3
#define PAGE_DATA 1024
#define PAGE_TAIL 2
#define PACKET_SIZE (PAGE_HEADER + PAGE_DATA + PAGE_TAIL)
#define TS_WRITE_REGS_LEN 1030
#define TIMEOUT_CNT 10
#define STRING_BUF_LEN 100
/* State Registers */
#define MIP_ADDR_INPUT_INFORMATION 0x01
#define ISC_ADDR_VERSION 0xE1
#define ISC_ADDR_SECTION_PAGE_INFO 0xE5
/* Config Update Commands */
#define ISC_CMD_ENTER_ISC 0x5F
#define ISC_CMD_ENTER_ISC_PARA1 0x01
#define ISC_CMD_UPDATE_MODE 0xAE
#define ISC_SUBCMD_ENTER_UPDATE 0x55
#define ISC_SUBCMD_DATA_WRITE 0XF1
#define ISC_SUBCMD_LEAVE_UPDATE_PARA1 0x0F
#define ISC_SUBCMD_LEAVE_UPDATE_PARA2 0xF0
#define ISC_CMD_CONFIRM_STATUS 0xAF
#define ISC_STATUS_UPDATE_MODE 0x01
#define ISC_STATUS_CRC_CHECK_SUCCESS 0x03
#define ISC_CHAR_2_BCD(num) (((num/10)<<4) + (num%10))
#define ISC_MAX(x, y) (((x) > (y)) ? (x) : (y))
static const char section_name[SECTION_NUM][SECTION_NAME_LEN] = {
"BOOT", "CORE", "PRIV", "PUBL"
};
static const unsigned char crc0_buf[31] = {
0x1D, 0x2C, 0x05, 0x34, 0x95, 0xA4, 0x8D, 0xBC,
0x59, 0x68, 0x41, 0x70, 0xD1, 0xE0, 0xC9, 0xF8,
0x3F, 0x0E, 0x27, 0x16, 0xB7, 0x86, 0xAF, 0x9E,
0x7B, 0x4A, 0x63, 0x52, 0xF3, 0xC2, 0xEB
};
static const unsigned char crc1_buf[31] = {
0x1E, 0x9C, 0xDF, 0x5D, 0x76, 0xF4, 0xB7, 0x35,
0x2A, 0xA8, 0xEB, 0x69, 0x42, 0xC0, 0x83, 0x01,
0x04, 0x86, 0xC5, 0x47, 0x6C, 0xEE, 0xAD, 0x2F,
0x30, 0xB2, 0xF1, 0x73, 0x58, 0xDA, 0x99
};
static tISCFWInfo_t mbin_info[SECTION_NUM];
static tISCFWInfo_t ts_info[SECTION_NUM]; /* read F/W version from IC */
static bool section_update_flag[SECTION_NUM];
const struct firmware *fw_mbin[SECTION_NUM];
static unsigned char g_wr_buf[1024 + 3 + 2];
#endif
enum fw_flash_mode {
ISP_FLASH,
ISC_FLASH,
};
enum {
BUILT_IN = 0,
UMS,
REQ_FW,
};
struct tsp_callbacks {
void (*inform_charger)(struct tsp_callbacks *tsp_cb, bool mode);
};
struct mms_ts_info {
struct i2c_client *client;
struct input_dev *input_dev;
char phys[32];
int max_x;
int max_y;
bool invert_x;
bool invert_y;
const u8 *config_fw_version;
int irq;
int (*power) (bool on);
struct melfas_tsi_platform_data *pdata;
struct early_suspend early_suspend;
/* protects the enabled flag */
struct mutex lock;
bool enabled;
void (*register_cb)(void *);
struct tsp_callbacks callbacks;
bool ta_status;
bool noise_mode;
unsigned char finger_state[MAX_FINGERS];
#if defined(SEC_TSP_FW_UPDATE)
u8 fw_update_state;
#endif
u8 fw_ic_ver;
enum fw_flash_mode fw_flash_mode;
#if TOUCH_BOOSTER
struct delayed_work work_dvfs_off;
struct delayed_work work_dvfs_chg;
bool dvfs_lock_status;
int cpufreq_level;
struct mutex dvfs_lock;
#endif
#if defined(SEC_TSP_FACTORY_TEST)
struct list_head cmd_list_head;
u8 cmd_state;
char cmd[TSP_CMD_STR_LEN];
int cmd_param[TSP_CMD_PARAM_NUM];
char cmd_result[TSP_CMD_RESULT_STR_LEN];
struct mutex cmd_lock;
bool cmd_is_running;
unsigned int reference[NODE_NUM];
unsigned int raw[NODE_NUM]; /* CM_ABS */
unsigned int inspection[NODE_NUM];/* CM_DELTA */
unsigned int intensity[NODE_NUM];
bool ft_flag;
#endif /* SEC_TSP_FACTORY_TEST */
};
struct mms_fw_image {
__le32 hdr_len;
__le32 data_len;
__le32 fw_ver;
__le32 hdr_ver;
u8 data[0];
} __packed;
#ifdef CONFIG_HAS_EARLYSUSPEND
static void mms_ts_early_suspend(struct early_suspend *h);
static void mms_ts_late_resume(struct early_suspend *h);
#endif
#if TOUCH_BOOSTER
static bool dvfs_lock_status = false;
static bool press_status = false;
#endif
#if defined(SEC_TSP_FACTORY_TEST)
#define TSP_CMD(name, func) .cmd_name = name, .cmd_func = func
struct tsp_cmd {
struct list_head list;
const char *cmd_name;
void (*cmd_func)(void *device_data);
};
static void fw_update(void *device_data);
static void get_fw_ver_bin(void *device_data);
static void get_fw_ver_ic(void *device_data);
static void get_config_ver(void *device_data);
static void get_threshold(void *device_data);
static void module_off_master(void *device_data);
static void module_on_master(void *device_data);
static void module_off_slave(void *device_data);
static void module_on_slave(void *device_data);
static void get_chip_vendor(void *device_data);
static void get_chip_name(void *device_data);
static void get_reference(void *device_data);
static void get_cm_abs(void *device_data);
static void get_cm_delta(void *device_data);
static void get_intensity(void *device_data);
static void get_x_num(void *device_data);
static void get_y_num(void *device_data);
static void run_reference_read(void *device_data);
static void run_cm_abs_read(void *device_data);
static void run_cm_delta_read(void *device_data);
static void run_intensity_read(void *device_data);
static void not_support_cmd(void *device_data);
struct tsp_cmd tsp_cmds[] = {
{TSP_CMD("fw_update", fw_update),},
{TSP_CMD("get_fw_ver_bin", get_fw_ver_bin),},
{TSP_CMD("get_fw_ver_ic", get_fw_ver_ic),},
{TSP_CMD("get_config_ver", get_config_ver),},
{TSP_CMD("get_threshold", get_threshold),},
/* {TSP_CMD("module_off_master", module_off_master),},
{TSP_CMD("module_on_master", module_on_master),},
{TSP_CMD("module_off_slave", not_support_cmd),},
{TSP_CMD("module_on_slave", not_support_cmd),}, */
{TSP_CMD("get_chip_vendor", get_chip_vendor),},
{TSP_CMD("get_chip_name", get_chip_name),},
{TSP_CMD("get_x_num", get_x_num),},
{TSP_CMD("get_y_num", get_y_num),},
{TSP_CMD("get_reference", get_reference),},
{TSP_CMD("get_cm_abs", get_cm_abs),},
{TSP_CMD("get_cm_delta", get_cm_delta),},
{TSP_CMD("get_intensity", get_intensity),},
{TSP_CMD("run_reference_read", run_reference_read),},
{TSP_CMD("run_cm_abs_read", run_cm_abs_read),},
{TSP_CMD("run_cm_delta_read", run_cm_delta_read),},
{TSP_CMD("run_intensity_read", run_intensity_read),},
{TSP_CMD("not_support_cmd", not_support_cmd),},
};
#endif
#if TOUCH_BOOSTER
static void change_dvfs_lock(struct work_struct *work)
{
struct mms_ts_info *info = container_of(work,
struct mms_ts_info, work_dvfs_chg.work);
int ret;
mutex_lock(&info->dvfs_lock);
ret = dev_lock(bus_dev, sec_touchscreen, 267160); /* 266 Mhz setting */
if (ret < 0)
pr_err("%s: dev change bud lock failed(%d)\n",\
__func__, __LINE__);
else
pr_info("[TSP] change_dvfs_lock");
mutex_unlock(&info->dvfs_lock);
}
static void set_dvfs_off(struct work_struct *work)
{
struct mms_ts_info *info = container_of(work,
struct mms_ts_info, work_dvfs_off.work);
int ret;
mutex_lock(&info->dvfs_lock);
ret = dev_unlock(bus_dev, sec_touchscreen);
if (ret < 0)
pr_err("%s: dev unlock failed(%d)\n",
__func__, __LINE__);
exynos_cpufreq_lock_free(DVFS_LOCK_ID_TSP);
info->dvfs_lock_status = false;
pr_info("[TSP] DVFS Off!");
mutex_unlock(&info->dvfs_lock);
}
static void set_dvfs_lock(struct mms_ts_info *info, uint32_t on)
{
int ret;
mutex_lock(&info->dvfs_lock);
if (info->cpufreq_level <= 0) {
ret = exynos_cpufreq_get_level(800000, &info->cpufreq_level);
if (ret < 0)
pr_err("[TSP] exynos_cpufreq_get_level error");
goto out;
}
if (on == 0) {
if (info->dvfs_lock_status) {
cancel_delayed_work(&info->work_dvfs_chg);
schedule_delayed_work(&info->work_dvfs_off,
msecs_to_jiffies(TOUCH_BOOSTER_OFF_TIME));
}
} else if (on == 1) {
cancel_delayed_work(&info->work_dvfs_off);
if (!info->dvfs_lock_status) {
ret = dev_lock(bus_dev, sec_touchscreen, 400200);
if (ret < 0) {
pr_err("%s: dev lock failed(%d)\n",\
__func__, __LINE__);
}
ret = exynos_cpufreq_lock(DVFS_LOCK_ID_TSP,
info->cpufreq_level);
if (ret < 0)
pr_err("%s: cpu lock failed(%d)\n",\
__func__, __LINE__);
schedule_delayed_work(&info->work_dvfs_chg,
msecs_to_jiffies(TOUCH_BOOSTER_CHG_TIME));
info->dvfs_lock_status = true;
pr_info("[TSP] DVFS On![%d]", info->cpufreq_level);
}
} else if (on == 2) {
cancel_delayed_work(&info->work_dvfs_off);
cancel_delayed_work(&info->work_dvfs_chg);
schedule_work(&info->work_dvfs_off.work);
}
out:
mutex_unlock(&info->dvfs_lock);
}
#endif
static inline void mms_pwr_on_reset(struct mms_ts_info *info)
{
struct i2c_adapter *adapter = to_i2c_adapter(info->client->dev.parent);
if (!info->pdata->mux_fw_flash) {
dev_info(&info->client->dev,
"missing platform data, can't do power-on-reset\n");
return;
}
i2c_lock_adapter(adapter);
info->pdata->mux_fw_flash(true);
info->pdata->power(false);
gpio_direction_output(info->pdata->gpio_sda, 0);
gpio_direction_output(info->pdata->gpio_scl, 0);
gpio_direction_output(info->pdata->gpio_int, 0);
msleep(50);
info->pdata->power(true);
msleep(200);
info->pdata->mux_fw_flash(false);
i2c_unlock_adapter(adapter);
/* TODO: Seems long enough for the firmware to boot.
* Find the right value */
msleep(250);
}
static void release_all_fingers(struct mms_ts_info *info)
{
struct i2c_client *client = info->client;
int i;
pr_debug(KERN_DEBUG "[TSP] %s\n", __func__);
for (i = 0; i < MAX_FINGERS; i++) {
if (info->finger_state[i] == 1) {
dev_notice(&client->dev, "finger %d up(force)\n", i);
}
info->finger_state[i] = 0;
input_mt_slot(info->input_dev, i);
input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER,
false);
}
input_sync(info->input_dev);
#if TOUCH_BOOSTER
set_dvfs_lock(info, 2);
pr_info("[TSP] dvfs_lock free.\n ");
#endif
}
static void mms_set_noise_mode(struct mms_ts_info *info)
{
struct i2c_client *client = info->client;
if (!(info->noise_mode && info->enabled))
return;
dev_notice(&client->dev, "%s\n", __func__);
if (info->ta_status) {
dev_notice(&client->dev, "noise_mode & TA connect!!!\n");
i2c_smbus_write_byte_data(info->client, 0x30, 0x1);
} else {
dev_notice(&client->dev, "noise_mode & TA disconnect!!!\n");
i2c_smbus_write_byte_data(info->client, 0x30, 0x2);
}
}
static void reset_mms_ts(struct mms_ts_info *info)
{
struct i2c_client *client = info->client;
if (info->enabled == false)
return;
dev_notice(&client->dev, "%s++\n", __func__);
disable_irq_nosync(info->irq);
info->enabled = false;
touch_is_pressed = 0;
release_all_fingers(info);
mms_pwr_on_reset(info);
enable_irq(info->irq);
info->enabled = true;
if (info->ta_status) {
dev_notice(&client->dev, "TA connect!!!\n");
i2c_smbus_write_byte_data(info->client, 0x33, 0x1);
} else {
dev_notice(&client->dev, "TA disconnect!!!\n");
i2c_smbus_write_byte_data(info->client, 0x33, 0x2);
}
mms_set_noise_mode(info);
dev_notice(&client->dev, "%s--\n", __func__);
}
static void melfas_ta_cb(struct tsp_callbacks *cb, bool ta_status)
{
struct mms_ts_info *info =
container_of(cb, struct mms_ts_info, callbacks);
struct i2c_client *client = info->client;
dev_notice(&client->dev, "%s\n", __func__);
info->ta_status = ta_status;
if (info->enabled) {
if (info->ta_status) {
dev_notice(&client->dev, "TA connect!!!\n");
i2c_smbus_write_byte_data(info->client, 0x33, 0x1);
} else {
dev_notice(&client->dev, "TA disconnect!!!\n");
i2c_smbus_write_byte_data(info->client, 0x33, 0x2);
}
mms_set_noise_mode(info);
}
}
static irqreturn_t mms_ts_interrupt(int irq, void *dev_id)
{
struct mms_ts_info *info = dev_id;
struct i2c_client *client = info->client;
u8 buf[MAX_FINGERS * FINGER_EVENT_SZ] = { 0 };
int ret;
int i;
int sz;
u8 reg = MMS_INPUT_EVENT0;
struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = 0,
.buf = ®,
.len = 1,
}, {
.addr = client->addr,
.flags = I2C_M_RD,
.buf = buf,
},
};
sz = i2c_smbus_read_byte_data(client, MMS_INPUT_EVENT_PKT_SZ);
if (sz < 0) {
dev_err(&client->dev, "%s bytes=%d\n", __func__, sz);
for (i = 0; i < 50; i++) {
sz = i2c_smbus_read_byte_data(client,
MMS_INPUT_EVENT_PKT_SZ);
if (sz > 0)
break;
}
if (i == 50) {
dev_dbg(&client->dev, "i2c failed... reset!!\n");
reset_mms_ts(info);
goto out;
}
}
/* BUG_ON(sz > MAX_FINGERS*FINGER_EVENT_SZ); */
if (sz == 0)
goto out;
if (sz > MAX_FINGERS*FINGER_EVENT_SZ) {
dev_err(&client->dev, "[TSP] abnormal data inputed.\n");
goto out;
}
msg[1].len = sz;
ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
if (ret != ARRAY_SIZE(msg)) {
dev_err(&client->dev,
"failed to read %d bytes of touch data (%d)\n",
sz, ret);
goto out;
}
#if defined(VERBOSE_DEBUG)
print_hex_dump(KERN_DEBUG, "mms_ts raw: ",
DUMP_PREFIX_OFFSET, 32, 1, buf, sz, false);
#endif
if (buf[0] == 0x0F) { /* ESD */
dev_dbg(&client->dev, "ESD DETECT.... reset!!\n");
reset_mms_ts(info);
goto out;
}
if (buf[0] == 0x0E) { /* NOISE MODE */
dev_dbg(&client->dev, "[TSP] noise mode enter!!\n");
info->noise_mode = 1 ;
mms_set_noise_mode(info);
goto out;
}
for (i = 0; i < sz; i += FINGER_EVENT_SZ) {
u8 *tmp = &buf[i];
int id = (tmp[0] & 0xf) - 1;
int x = tmp[2] | ((tmp[1] & 0xf) << 8);
int y = tmp[3] | (((tmp[1] >> 4) & 0xf) << 8);
int angle = (tmp[5] >= 127) ? (-(256 - tmp[5])) : tmp[5];
int palm = (tmp[0] & 0x10) >> 4;
if (info->invert_x) {
x = info->max_x - x;
if (x < 0)
x = 0;
}
if (info->invert_y) {
y = info->max_y - y;
if (y < 0)
y = 0;
}
if (id >= MAX_FINGERS) {
dev_notice(&client->dev, \
"finger id error [%d]\n", id);
reset_mms_ts(info);
goto out;
}
if ((tmp[0] & 0x80) == 0) {
#if defined(SEC_TSP_DEBUG)
dev_dbg(&client->dev,
"finger id[%d]: x=%d y=%d p=%d w=%d major=%d minor=%d angle=%d palm=%d\n",
id, x, y, tmp[5], tmp[4], tmp[6], tmp[7]
, angle, palm);
#else
if (info->finger_state[id] != 0) {
dev_notice(&client->dev,
"finger [%d] up, palm %d\n", id, palm);
}
#endif
input_mt_slot(info->input_dev, id);
input_mt_report_slot_state(info->input_dev,
MT_TOOL_FINGER, false);
info->finger_state[id] = 0;
continue;
}
input_mt_slot(info->input_dev, id);
input_mt_report_slot_state(info->input_dev,
MT_TOOL_FINGER, true);
input_report_abs(info->input_dev, ABS_MT_WIDTH_MAJOR, tmp[4]);
input_report_abs(info->input_dev, ABS_MT_POSITION_X, x);
input_report_abs(info->input_dev, ABS_MT_POSITION_Y, y);
input_report_abs(info->input_dev, ABS_MT_TOUCH_MAJOR, tmp[6]);
input_report_abs(info->input_dev, ABS_MT_TOUCH_MINOR, tmp[7]);
input_report_abs(info->input_dev, ABS_MT_ANGLE, angle);
input_report_abs(info->input_dev, ABS_MT_PALM, palm);
#if defined(SEC_TSP_DEBUG)
if (info->finger_state[id] == 0) {
info->finger_state[id] = 1;
dev_dbg(&client->dev,
"finger id[%d]: x=%d y=%d w=%d major=%d minor=%d angle=%d palm=%d\n",
id, x, y, tmp[4], tmp[6], tmp[7]
, angle, palm);
if (finger_event_sz == 10)
dev_dbg(&client->dev, \
"pressure = %d\n", tmp[8]);
}
#else
if (info->finger_state[id] == 0) {
info->finger_state[id] = 1;
dev_notice(&client->dev,
"finger [%d] down, palm %d\n", id, palm);
}
#endif
}
input_sync(info->input_dev);
touch_is_pressed = 0;
for (i = 0; i < MAX_FINGERS; i++) {
if (info->finger_state[i] == 1)
touch_is_pressed++;
}
#if TOUCH_BOOSTER
set_dvfs_lock(info, !!touch_is_pressed);
#endif
out:
return IRQ_HANDLED;
}
int get_tsp_status(void)
{
return touch_is_pressed;
}
EXPORT_SYMBOL(get_tsp_status);
#if ISC_DL_MODE
static int mms100_i2c_read(struct i2c_client *client,
u16 addr, u16 length, u8 *value)
{
struct i2c_adapter *adapter = client->adapter;
struct i2c_msg msg;
int ret = -1;
msg.addr = client->addr;
msg.flags = 0x00;
msg.len = 1;
msg.buf = (u8 *) &addr;
ret = i2c_transfer(adapter, &msg, 1);
if (ret >= 0) {
msg.addr = client->addr;
msg.flags = I2C_M_RD;
msg.len = length;
msg.buf = (u8 *) value;
ret = i2c_transfer(adapter, &msg, 1);
}
if (ret < 0)
pr_err("[TSP] : read error : [%d]", ret);
return ret;
}
static int mms100_reset(struct mms_ts_info *info)
{
info->pdata->power(false);
msleep(30);
info->pdata->power(true);
msleep(300);
return ISC_SUCCESS;
}
/*
static int mms100_check_operating_mode(struct i2c_client *_client,
const int _error_code)
{
int ret;
unsigned char rd_buf = 0x00;
unsigned char count = 0;
if(_client == NULL)
pr_err("[TSP ISC] _client is null");
ret = mms100_i2c_read(_client, ISC_ADDR_VERSION, 1, &rd_buf);
if (ret<0) {
pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n",
__func__, __LINE__, ret);
return _error_code;
}
return ISC_SUCCESS;
}
*/
static int mms100_get_version_info(struct i2c_client *_client)
{
int i, ret;
unsigned char rd_buf[8];
/* config version brust read (core, private, public) */
ret = mms100_i2c_read(_client, ISC_ADDR_VERSION, 4, rd_buf);
if (ret < 0) {
pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n",
__func__, __LINE__, ret);
return ISC_I2C_ERROR;
}
for (i = 0; i < SECTION_NUM; i++)
ts_info[i].version = rd_buf[i];
ts_info[SEC_CORE].compatible_version =
ts_info[SEC_BOOTLOADER].version;
ts_info[SEC_PRIVATE_CONFIG].compatible_version =
ts_info[SEC_PUBLIC_CONFIG].compatible_version =
ts_info[SEC_CORE].version;
ret = mms100_i2c_read(_client, ISC_ADDR_SECTION_PAGE_INFO, 8, rd_buf);
if (ret < 0) {
pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n",
__func__, __LINE__, ret);
return ISC_I2C_ERROR;
}
for (i = 0; i < SECTION_NUM; i++) {
ts_info[i].start_addr = rd_buf[i];
ts_info[i].end_addr = rd_buf[i + SECTION_NUM];
}
for (i = 0; i < SECTION_NUM; i++) {
pr_info("TS : Section(%d) version: 0x%02X\n",
i, ts_info[i].version);
pr_info("TS : Section(%d) Start Address: 0x%02X\n",
i, ts_info[i].start_addr);
pr_info("TS : Section(%d) End Address: 0x%02X\n",
i, ts_info[i].end_addr);
pr_info("TS : Section(%d) Compatibility: 0x%02X\n",
i, ts_info[i].compatible_version);
}
return ISC_SUCCESS;
}
static int mms100_seek_section_info(void)
{
int i;
char str_buf[STRING_BUF_LEN];
char name_buf[SECTION_NAME_LEN];
int version;
int page_num;
const unsigned char *buf;
int next_ptr;
for (i = 0; i < SECTION_NUM; i++) {
if (fw_mbin[i] == NULL) {
buf = NULL;
pr_info("[TSP ISC] fw_mbin[%d]->data is NULL", i);
} else {
buf = fw_mbin[i]->data;
}
if (buf == NULL) {
mbin_info[i].version = ts_info[i].version;
mbin_info[i].compatible_version =
ts_info[i].compatible_version;
mbin_info[i].start_addr = ts_info[i].start_addr;
mbin_info[i].end_addr = ts_info[i].end_addr;
} else {
next_ptr = 0;
do {
sscanf(buf + next_ptr, "%s", str_buf);
next_ptr += strlen(str_buf) + 1;
} while (!strstr(str_buf, "SECTION_NAME"));
sscanf(buf + next_ptr, "%s%s", str_buf, name_buf);
if (strncmp(section_name[i], name_buf,
SECTION_NAME_LEN))
return ISC_FILE_FORMAT_ERROR;
do {
sscanf(buf + next_ptr, "%s", str_buf);
next_ptr += strlen(str_buf) + 1;
} while (!strstr(str_buf, "SECTION_VERSION"));
sscanf(buf + next_ptr, "%s%d", str_buf, &version);
mbin_info[i].version = ISC_CHAR_2_BCD(version);
do {
sscanf(buf + next_ptr, "%s", str_buf);
next_ptr += strlen(str_buf) + 1;
} while (!strstr(str_buf, "START_PAGE_ADDR"));
sscanf(buf + next_ptr, "%s%d", str_buf, &page_num);
mbin_info[i].start_addr = page_num;
do {
sscanf(buf + next_ptr, "%s", str_buf);
next_ptr += strlen(str_buf) + 1;
} while (!strstr(str_buf, "END_PAGE_ADDR"));
sscanf(buf + next_ptr, "%s%d", str_buf, &page_num);
mbin_info[i].end_addr = page_num;
do {
sscanf(buf + next_ptr, "%s", str_buf);
next_ptr += strlen(str_buf) + 1;
} while (!strstr(str_buf, "COMPATIBLE_VERSION"));
sscanf(buf + next_ptr, "%s%d", str_buf, &version);
mbin_info[i].compatible_version =
ISC_CHAR_2_BCD(version);
do {
sscanf(buf + next_ptr, "%s", str_buf);
next_ptr += strlen(str_buf) + 1;
} while (!strstr(str_buf, "[Binary]"));
if (mbin_info[i].version == 0xFF)
return ISC_FILE_FORMAT_ERROR;
}
}
for (i = 0; i < SECTION_NUM; i++) {
pr_info("[TSP ISC] MBin : Section(%d) Version: 0x%02X\n",
i, mbin_info[i].version);
pr_info("[TSP ISC] MBin : Section(%d) Start Address: 0x%02X\n",
i, mbin_info[i].start_addr);
pr_info("[TSP ISC] MBin : Section(%d) End Address: 0x%02X\n",
i, mbin_info[i].end_addr);
pr_info("[TSP ISC] MBin : Section(%d) Compatibility: 0x%02X\n",
i, mbin_info[i].compatible_version);
}
return ISC_SUCCESS;
}
static eISCRet_t mms100_compare_version_info(struct i2c_client *_client)
{
int i, ret;
unsigned char expected_compatibility[SECTION_NUM];
if (mms100_get_version_info(_client) != ISC_SUCCESS)
return ISC_I2C_ERROR;
ret = mms100_seek_section_info();
/* Check update areas , 0 : bootloader 1: core 2: private 3: public */
for (i = 0; i < SECTION_NUM; i++) {
if ((mbin_info[i].version == 0) ||
(mbin_info[i].version != ts_info[i].version)) {
section_update_flag[i] = true;
pr_info("[TSP ISC] [%d] section will be updated!", i);
}
}
section_update_flag[0] = false;
pr_info("[TSP ISC] [%d] [%d] [%d]", section_update_flag[1],
section_update_flag[2], section_update_flag[3]);
if (section_update_flag[SEC_BOOTLOADER]) {
expected_compatibility[SEC_CORE] =
mbin_info[SEC_BOOTLOADER].version;
} else {
expected_compatibility[SEC_CORE] =
ts_info[SEC_BOOTLOADER].version;
}
if (section_update_flag[SEC_CORE]) {
expected_compatibility[SEC_PUBLIC_CONFIG] =
expected_compatibility[SEC_PRIVATE_CONFIG] =
mbin_info[SEC_CORE].version;
} else {
expected_compatibility[SEC_PUBLIC_CONFIG] =
expected_compatibility[SEC_PRIVATE_CONFIG] =
ts_info[SEC_CORE].version;
}
for (i = SEC_CORE; i < SEC_PUBLIC_CONFIG; i++) {
if (section_update_flag[i]) {
pr_info("[TSP ISC] section_update_flag(%d), 0x%02x, 0x%02x\n",
i, expected_compatibility[i],
mbin_info[i].compatible_version);
if (expected_compatibility[i] !=
mbin_info[i].compatible_version)
return ISC_COMPATIVILITY_ERROR;
} else {
pr_info("[TSP ISC] !section_update_flag(%d), 0x%02x, 0x%02x\n",
i, expected_compatibility[i],
ts_info[i].compatible_version);
if (expected_compatibility[i] !=
ts_info[i].compatible_version)
return ISC_COMPATIVILITY_ERROR;
}
}
return ISC_SUCCESS;
}
static int mms100_enter_ISC_mode(struct i2c_client *_client)
{
int ret;
unsigned char wr_buf[2];
pr_info("[TSP ISC] %s\n", __func__);
wr_buf[0] = ISC_CMD_ENTER_ISC;
wr_buf[1] = ISC_CMD_ENTER_ISC_PARA1;
ret = i2c_master_send(_client, wr_buf, 2);
if (ret < 0) {
pr_info("[TSP ISC] %s,%d: i2c write fail[%d]\n",
__func__, __LINE__, ret);
return ISC_I2C_ERROR;
}
msleep(50);
return ISC_SUCCESS;
}
static int mms100_enter_config_update(struct i2c_client *_client)
{
int ret;
unsigned char wr_buf[10] = {0,};
unsigned char rd_buf;
wr_buf[0] = ISC_CMD_UPDATE_MODE;
wr_buf[1] = ISC_SUBCMD_ENTER_UPDATE;
ret = i2c_master_send(_client, wr_buf, 10);
if (ret < 0) {
pr_info("[TSP ISC] %s,%d: i2c write fail[%d]\n",
__func__, __LINE__, ret);
return ISC_I2C_ERROR;
}
ret = mms100_i2c_read(_client, ISC_CMD_CONFIRM_STATUS, 1, &rd_buf);
if (ret < 0) {
pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n",
__func__, __LINE__, ret);
return ISC_I2C_ERROR;
}
if (rd_buf != ISC_STATUS_UPDATE_MODE)
return ISC_UPDATE_MODE_ENTER_ERROR;
pr_info("[TSP ISC]End mms100_enter_config_update()\n");
return ISC_SUCCESS;
}
static int mms100_ISC_clear_page(struct i2c_client *_client,
unsigned char _page_addr)
{
int ret;
unsigned char rd_buf;
memset(&g_wr_buf[3], 0xFF, PAGE_DATA);
g_wr_buf[0] = ISC_CMD_UPDATE_MODE; /* command */
g_wr_buf[1] = ISC_SUBCMD_DATA_WRITE; /* sub_command */
g_wr_buf[2] = _page_addr;
g_wr_buf[PAGE_HEADER + PAGE_DATA] = crc0_buf[_page_addr];
g_wr_buf[PAGE_HEADER + PAGE_DATA + 1] = crc1_buf[_page_addr];
ret = i2c_master_send(_client, g_wr_buf, PACKET_SIZE);
if (ret < 0) {
pr_info("[TSP ISC] %s,%d: i2c write fail[%d]\n",
__func__, __LINE__, ret);
return ISC_I2C_ERROR;
}
ret = mms100_i2c_read(_client, ISC_CMD_CONFIRM_STATUS, 1, &rd_buf);
if (ret < 0) {
pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n",
__func__, __LINE__, ret);
return ISC_I2C_ERROR;
}
if (rd_buf != ISC_STATUS_CRC_CHECK_SUCCESS)
return ISC_UPDATE_MODE_ENTER_ERROR;
pr_info("[TSP ISC]End mms100_ISC_clear_page()\n");
return ISC_SUCCESS;
}
static int mms100_ISC_clear_validate_markers(struct i2c_client *_client)
{
int ret_msg;
int i, j;
bool is_matched_address;
for (i = SEC_CORE; i <= SEC_PUBLIC_CONFIG; i++) {
if (section_update_flag[i]) {
if (ts_info[i].end_addr <= 30 &&
ts_info[i].end_addr > 0) {
ret_msg = mms100_ISC_clear_page(_client,
ts_info[i].end_addr);
if (ret_msg != ISC_SUCCESS)
return ret_msg;
}
}
}
for (i = SEC_CORE; i <= SEC_PUBLIC_CONFIG; i++) {
if (section_update_flag[i]) {
is_matched_address = false;
for (j = SEC_CORE; j <= SEC_PUBLIC_CONFIG; j++) {
if (mbin_info[i].end_addr ==
ts_info[i].end_addr) {
is_matched_address = true;
break;
}
}
if (!is_matched_address) {
if (mbin_info[i].end_addr <= 30 &&
mbin_info[i].end_addr > 0) {
ret_msg = mms100_ISC_clear_page(_client,
mbin_info[i].end_addr);
if (ret_msg != ISC_SUCCESS)
return ret_msg;
}
}
}
}
return ISC_SUCCESS;
}
static int mms100_update_section_data(struct i2c_client *_client)
{
int i, ret, next_ptr;
unsigned char rd_buf;
const unsigned char *ptr_fw;
char str_buf[STRING_BUF_LEN];
int page_addr;
for (i = 0; i < SECTION_NUM; i++) {
if (section_update_flag[i]) {
pr_info("[TSP ISC] section data i2c flash : [%d]", i);
next_ptr = 0;
ptr_fw = fw_mbin[i]->data;
do {
sscanf(ptr_fw + next_ptr, "%s", str_buf);
next_ptr += strlen(str_buf) + 1;
} while (!strstr(str_buf, "[Binary]"));
ptr_fw = ptr_fw + next_ptr + 2;
for (page_addr = mbin_info[i].start_addr;
page_addr <= mbin_info[i].end_addr;
page_addr++) {
if (page_addr - mbin_info[i].start_addr > 0)
ptr_fw += PACKET_SIZE;
if ((ptr_fw[0] != ISC_CMD_UPDATE_MODE) ||
(ptr_fw[1] != ISC_SUBCMD_DATA_WRITE) ||
(ptr_fw[2] != page_addr))
return ISC_WRITE_BUFFER_ERROR;
ret = i2c_master_send(_client,
ptr_fw, PACKET_SIZE);
if (ret < 0) {
pr_info("[TSP ISC] %s,%d: i2c write fail[%d]\n",
__func__, __LINE__, ret);
return ISC_I2C_ERROR;
}
ret = mms100_i2c_read(_client,
ISC_CMD_CONFIRM_STATUS, 1, &rd_buf);
if (ret < 0) {
pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n",
__func__, __LINE__, ret);
return ISC_I2C_ERROR;
}
if (rd_buf != ISC_STATUS_CRC_CHECK_SUCCESS)
return ISC_CRC_ERROR;
section_update_flag[i] = false;
}
}
}
pr_info("[TSP ISC]End mms100_update_section_data()\n");
return ISC_SUCCESS;
}
static eISCRet_t mms100_open_mbinary(struct i2c_client *_client)
{
int i;
char file_name[64];
int ret = 0;
ret += request_firmware(&(fw_mbin[1]),\
"tsp_melfas/CORE.fw", &_client->dev);
ret += request_firmware(&(fw_mbin[2]),\
"tsp_melfas/PRIV.fw", &_client->dev);
ret += request_firmware(&(fw_mbin[3]),\
"tsp_melfas/PUBL.fw", &_client->dev);
if (!ret)
return ISC_SUCCESS;
else {
pr_info("[TSP ISC] request_firmware fail");
return ret;
}
}
static eISCRet_t mms100_close_mbinary(void)
{
int i;
for (i = 0; i < SECTION_NUM; i++) {
if (fw_mbin[i] != NULL)
release_firmware(fw_mbin[i]);
}
return ISC_SUCCESS;
}
eISCRet_t mms100_ISC_download_mbinary(struct mms_ts_info *info)
{
struct i2c_client *_client = info->client;
eISCRet_t ret_msg = ISC_NONE;
pr_info("[TSP ISC] %s\n", __func__);
mms100_reset(info);
/* ret_msg = mms100_check_operating_mode(_client, EC_BOOT_ON_SUCCEEDED);
if (ret_msg != ISC_SUCCESS)
goto ISC_ERROR_HANDLE;
*/
ret_msg = mms100_open_mbinary(_client);
if (ret_msg != ISC_SUCCESS)
goto ISC_ERROR_HANDLE;
/*Config version Check*/
ret_msg = mms100_compare_version_info(_client);
if (ret_msg != ISC_SUCCESS)
goto ISC_ERROR_HANDLE;
ret_msg = mms100_enter_ISC_mode(_client);
if (ret_msg != ISC_SUCCESS)
goto ISC_ERROR_HANDLE;
ret_msg = mms100_enter_config_update(_client);
if (ret_msg != ISC_SUCCESS)
goto ISC_ERROR_HANDLE;
ret_msg = mms100_ISC_clear_validate_markers(_client);
if (ret_msg != ISC_SUCCESS)
goto ISC_ERROR_HANDLE;
pr_info("[TSP ISC]mms100_update_section_data start");
ret_msg = mms100_update_section_data(_client);
if (ret_msg != ISC_SUCCESS)
goto ISC_ERROR_HANDLE;
pr_info("[TSP ISC]mms100_update_section_data end");
/* mms100_reset(info); */
pr_info("[TSP ISC]FIRMWARE_UPDATE_FINISHED!!!\n");
ret_msg = ISC_SUCCESS;
ISC_ERROR_HANDLE:
if (ret_msg != ISC_SUCCESS)
pr_info("[TSP ISC]ISC_ERROR_CODE: %d\n", ret_msg);
mms100_reset(info);
mms100_close_mbinary();
return ret_msg;
}
#endif /* ISC_DL_MODE start */
static void hw_reboot(struct mms_ts_info *info, bool bootloader)
{
info->pdata->power(false);
gpio_direction_output(info->pdata->gpio_sda, bootloader ? 0 : 1);
gpio_direction_output(info->pdata->gpio_scl, bootloader ? 0 : 1);
gpio_direction_output(info->pdata->gpio_int, 0);
msleep(30);
info->pdata->power(true);
msleep(30);
if (bootloader) {
gpio_set_value(info->pdata->gpio_scl, 0);
gpio_set_value(info->pdata->gpio_sda, 1);
} else {
gpio_set_value(info->pdata->gpio_int, 1);
gpio_direction_input(info->pdata->gpio_int);
gpio_direction_input(info->pdata->gpio_scl);
gpio_direction_input(info->pdata->gpio_sda);
}
msleep(40);
}
static inline void hw_reboot_bootloader(struct mms_ts_info *info)
{
hw_reboot(info, true);
}
static inline void hw_reboot_normal(struct mms_ts_info *info)
{
hw_reboot(info, false);
}
static void isp_toggle_clk(struct mms_ts_info *info, int start_lvl, int end_lvl,
int hold_us)
{
gpio_set_value(info->pdata->gpio_scl, start_lvl);
udelay(hold_us);
gpio_set_value(info->pdata->gpio_scl, end_lvl);
udelay(hold_us);
}
/* 1 <= cnt <= 32 bits to write */
static void isp_send_bits(struct mms_ts_info *info, u32 data, int cnt)
{
gpio_direction_output(info->pdata->gpio_int, 0);
gpio_direction_output(info->pdata->gpio_scl, 0);
gpio_direction_output(info->pdata->gpio_sda, 0);
/* clock out the bits, msb first */
while (cnt--) {
gpio_set_value(info->pdata->gpio_sda, (data >> cnt) & 1);
udelay(3);
isp_toggle_clk(info, 1, 0, 3);
}
}
/* 1 <= cnt <= 32 bits to read */
static u32 isp_recv_bits(struct mms_ts_info *info, int cnt)
{
u32 data = 0;
gpio_direction_output(info->pdata->gpio_int, 0);
gpio_direction_output(info->pdata->gpio_scl, 0);
gpio_set_value(info->pdata->gpio_sda, 0);
gpio_direction_input(info->pdata->gpio_sda);
/* clock in the bits, msb first */
while (cnt--) {
isp_toggle_clk(info, 0, 1, 1);
data = (data << 1) | (!!gpio_get_value(info->pdata->gpio_sda));
}
gpio_direction_output(info->pdata->gpio_sda, 0);
return data;
}
static void isp_enter_mode(struct mms_ts_info *info, u32 mode)
{
int cnt;
unsigned long flags;
local_irq_save(flags);
gpio_direction_output(info->pdata->gpio_int, 0);
gpio_direction_output(info->pdata->gpio_scl, 0);
gpio_direction_output(info->pdata->gpio_sda, 1);
mode &= 0xffff;
for (cnt = 15; cnt >= 0; cnt--) {
gpio_set_value(info->pdata->gpio_int, (mode >> cnt) & 1);
udelay(3);
isp_toggle_clk(info, 1, 0, 3);
}
gpio_set_value(info->pdata->gpio_int, 0);
local_irq_restore(flags);
}
static void isp_exit_mode(struct mms_ts_info *info)
{
int i;
unsigned long flags;
local_irq_save(flags);
gpio_direction_output(info->pdata->gpio_int, 0);
udelay(3);
for (i = 0; i < 10; i++)
isp_toggle_clk(info, 1, 0, 3);
local_irq_restore(flags);
}
static void flash_set_address(struct mms_ts_info *info, u16 addr)
{
/* Only 13 bits of addr are valid.
* The addr is in bits 13:1 of cmd */
isp_send_bits(info, (u32) (addr & 0x1fff) << 1, 18);
}
static void flash_erase(struct mms_ts_info *info)
{
isp_enter_mode(info, ISP_MODE_FLASH_ERASE);
gpio_direction_output(info->pdata->gpio_int, 0);
gpio_direction_output(info->pdata->gpio_scl, 0);
gpio_direction_output(info->pdata->gpio_sda, 1);
/* 4 clock cycles with different timings for the erase to
* get processed, clk is already 0 from above */
udelay(7);
isp_toggle_clk(info, 1, 0, 3);
udelay(7);
isp_toggle_clk(info, 1, 0, 3);
usleep_range(25000, 35000);
isp_toggle_clk(info, 1, 0, 3);
usleep_range(150, 200);
isp_toggle_clk(info, 1, 0, 3);
gpio_set_value(info->pdata->gpio_sda, 0);
isp_exit_mode(info);
}
static u32 flash_readl(struct mms_ts_info *info, u16 addr)
{
int i;
u32 val;
unsigned long flags;
local_irq_save(flags);
isp_enter_mode(info, ISP_MODE_FLASH_READ);
flash_set_address(info, addr);
gpio_direction_output(info->pdata->gpio_scl, 0);
gpio_direction_output(info->pdata->gpio_sda, 0);
udelay(40);
/* data load cycle */
for (i = 0; i < 6; i++)
isp_toggle_clk(info, 1, 0, 10);
val = isp_recv_bits(info, 32);
isp_exit_mode(info);
local_irq_restore(flags);
return val;
}
static void flash_writel(struct mms_ts_info *info, u16 addr, u32 val)
{
unsigned long flags;
local_irq_save(flags);
isp_enter_mode(info, ISP_MODE_FLASH_WRITE);
flash_set_address(info, addr);
isp_send_bits(info, val, 32);
gpio_direction_output(info->pdata->gpio_sda, 1);
/* 6 clock cycles with different timings for the data to get written
* into flash */
isp_toggle_clk(info, 0, 1, 3);
isp_toggle_clk(info, 0, 1, 3);
isp_toggle_clk(info, 0, 1, 6);
isp_toggle_clk(info, 0, 1, 12);
isp_toggle_clk(info, 0, 1, 3);
isp_toggle_clk(info, 0, 1, 3);
isp_toggle_clk(info, 1, 0, 1);
gpio_direction_output(info->pdata->gpio_sda, 0);
isp_exit_mode(info);
local_irq_restore(flags);
usleep_range(300, 400);
}
static bool flash_is_erased(struct mms_ts_info *info)
{
struct i2c_client *client = info->client;
u32 val;
u16 addr;
for (addr = 0; addr < (ISP_MAX_FW_SIZE / 4); addr++) {
udelay(40);
val = flash_readl(info, addr);
if (val != 0xffffffff) {
dev_dbg(&client->dev,
"addr 0x%x not erased: 0x%08x != 0xffffffff\n",
addr, val);
return false;
}
}
return true;
}
static int fw_write_image(struct mms_ts_info *info, const u8 * data, size_t len)
{
struct i2c_client *client = info->client;
u16 addr = 0;
for (addr = 0; addr < (len / 4); addr++, data += 4) {
u32 val = get_unaligned_le32(data);
u32 verify_val;
int retries = 3;
while (retries--) {
flash_writel(info, addr, val);
verify_val = flash_readl(info, addr);
if (val == verify_val)
break;
dev_err(&client->dev,
"mismatch @ addr 0x%x: 0x%x != 0x%x\n",
addr, verify_val, val);
continue;
}
if (retries < 0)
return -ENXIO;
}
return 0;
}
static int fw_download(struct mms_ts_info *info, const u8 * data, size_t len)
{
struct i2c_client *client = info->client;
u32 val;
int ret = 0;
if (len % 4) {
dev_err(&client->dev,
"fw image size (%d) must be a multiple of 4 bytes\n",
len);
return -EINVAL;
} else if (len > ISP_MAX_FW_SIZE) {
dev_err(&client->dev,
"fw image is too big, %d > %d\n", len, ISP_MAX_FW_SIZE);
return -EINVAL;
}
dev_info(&client->dev, "fw download start\n");
info->pdata->power(false);
gpio_direction_output(info->pdata->gpio_sda, 0);
gpio_direction_output(info->pdata->gpio_scl, 0);
gpio_direction_output(info->pdata->gpio_int, 0);
hw_reboot_bootloader(info);
val = flash_readl(info, ISP_IC_INFO_ADDR);
dev_info(&client->dev, "IC info: 0x%02x (%x)\n", val & 0xff, val);
dev_info(&client->dev, "fw erase...\n");
flash_erase(info);
if (!flash_is_erased(info)) {
ret = -ENXIO;
goto err;
}
dev_info(&client->dev, "fw write...\n");
/* XXX: what does this do?! */
flash_writel(info, ISP_IC_INFO_ADDR, 0xffffff00 | (val & 0xff));
usleep_range(1000, 1500);
ret = fw_write_image(info, data, len);
if (ret)
goto err;
usleep_range(1000, 1500);
hw_reboot_normal(info);
usleep_range(1000, 1500);
dev_info(&client->dev, "fw download done...\n");
return 0;
err:
dev_err(&client->dev, "fw download failed...\n");
hw_reboot_normal(info);
return ret;
}
#if defined(SEC_TSP_ISC_FW_UPDATE)
static u16 gen_crc(u8 data, u16 pre_crc)
{
u16 crc;
u16 cur;
u16 temp;
u16 bit_1;
u16 bit_2;
int i;
crc = pre_crc;
for (i = 7; i >= 0; i--) {
cur = ((data >> i) & 0x01) ^ (crc & 0x0001);
bit_1 = cur ^ (crc >> 11 & 0x01);
bit_2 = cur ^ (crc >> 4 & 0x01);
temp = (cur << 4) | (crc >> 12 & 0x0F);
temp = (temp << 7) | (bit_1 << 6) | (crc >> 5 & 0x3F);
temp = (temp << 4) | (bit_2 << 3) | (crc >> 1 & 0x0007);
crc = temp;
}
return crc;
}
static int isc_fw_download(struct mms_ts_info *info, const u8 * data,
size_t len)
{
u8 *buff;
u16 crc_buf;
int src_idx;
int dest_idx;
int ret;
int i, j;
buff = kzalloc(ISC_PKT_SIZE, GFP_KERNEL);
if (!buff) {
dev_err(&info->client->dev, "%s: failed to allocate memory\n",
__func__);
ret = -1;
goto err_mem_alloc;
}
/* enterring ISC mode */
*buff = ISC_ENTER_ISC_DATA;
ret = i2c_smbus_write_byte_data(info->client,
ISC_ENTER_ISC_CMD, *buff);
if (ret < 0) {
dev_err(&info->client->dev,
"fail to enter ISC mode(err=%d)\n", ret);
goto fail_to_isc_enter;
}
usleep_range(10000, 20000);
dev_info(&info->client->dev, "Enter ISC mode\n");
/*enter ISC update mode */
*buff = ISC_ENTER_UPDATE_DATA;
ret = i2c_smbus_write_i2c_block_data(info->client,
ISC_CMD,
ISC_ENTER_UPDATE_DATA_LEN, buff);
if (ret < 0) {
dev_err(&info->client->dev,
"fail to enter ISC update mode(err=%d)\n", ret);
goto fail_to_isc_update;
}
dev_info(&info->client->dev, "Enter ISC update mode\n");
/* firmware write */
*buff = ISC_CMD;
*(buff + 1) = ISC_DATA_WRITE_SUB_CMD;
for (i = 0; i < ISC_PKT_NUM; i++) {
*(buff + 2) = i;
crc_buf = gen_crc(*(buff + 2), ISC_DEFAULT_CRC);
for (j = 0; j < ISC_PKT_DATA_SIZE; j++) {
dest_idx = ISC_PKT_HEADER_SIZE + j;
src_idx = i * ISC_PKT_DATA_SIZE +
((int)(j / WORD_SIZE)) * WORD_SIZE -
(j % WORD_SIZE) + 3;
*(buff + dest_idx) = *(data + src_idx);
crc_buf = gen_crc(*(buff + dest_idx), crc_buf);
}
*(buff + ISC_PKT_DATA_SIZE + ISC_PKT_HEADER_SIZE + 1) =
crc_buf & 0xFF;
*(buff + ISC_PKT_DATA_SIZE + ISC_PKT_HEADER_SIZE) =
crc_buf >> 8 & 0xFF;
ret = i2c_master_send(info->client, buff, ISC_PKT_SIZE);
if (ret < 0) {
dev_err(&info->client->dev,
"fail to firmware writing on packet %d.(%d)\n",
i, ret);
goto fail_to_fw_write;
}
usleep_range(1, 5);
/* confirm CRC */
ret = i2c_smbus_read_byte_data(info->client,
ISC_CHECK_STATUS_CMD);
if (ret == ISC_CONFIRM_CRC) {
dev_info(&info->client->dev,
"updating %dth firmware data packet.\n", i);
} else {
dev_err(&info->client->dev,
"fail to firmware update on %dth (%X).\n",
i, ret);
ret = -1;
goto fail_to_confirm_crc;
}
}
ret = 0;
fail_to_confirm_crc:
fail_to_fw_write:
/* exit ISC mode */
*buff = ISC_EXIT_ISC_SUB_CMD;
*(buff + 1) = ISC_EXIT_ISC_SUB_CMD2;
i2c_smbus_write_i2c_block_data(info->client, ISC_CMD, 2, buff);
usleep_range(10000, 20000);
fail_to_isc_update:
hw_reboot_normal(info);
fail_to_isc_enter:
kfree(buff);
err_mem_alloc:
return ret;
}
#endif /* SEC_TSP_ISC_FW_UPDATE */
static int get_fw_version(struct mms_ts_info *info)
{
int ret;
int retries = 3;
/* this seems to fail sometimes after a reset.. retry a few times */
do {
ret = i2c_smbus_read_byte_data(info->client, MMS_FW_VERSION);
} while (ret < 0 && retries-- > 0);
return ret;
}
static int get_hw_version(struct mms_ts_info *info)
{
int ret;
int retries = 3;
/* this seems to fail sometimes after a reset.. retry a few times */
do {
ret = i2c_smbus_read_byte_data(info->client, MMS_HW_REVISION);
} while (ret < 0 && retries-- > 0);
return ret;
}
static int mms_ts_enable(struct mms_ts_info *info, int wakeupcmd)
{
mutex_lock(&info->lock);
if (info->enabled)
goto out;
/* wake up the touch controller. */
if (wakeupcmd == 1) {
i2c_smbus_write_byte_data(info->client, 0, 0);
usleep_range(3000, 5000);
}
info->enabled = true;
enable_irq(info->irq);
out:
mutex_unlock(&info->lock);
return 0;
}
static int mms_ts_disable(struct mms_ts_info *info, int sleepcmd)
{
mutex_lock(&info->lock);
if (!info->enabled)
goto out;
disable_irq_nosync(info->irq);
if (sleepcmd == 1) {
i2c_smbus_write_byte_data(info->client, MMS_MODE_CONTROL, 0);
usleep_range(10000, 12000);
}
info->enabled = false;
touch_is_pressed = 0;
out:
mutex_unlock(&info->lock);
return 0;
}
static int mms_ts_finish_config(struct mms_ts_info *info)
{
struct i2c_client *client = info->client;
int ret;
ret = request_threaded_irq(client->irq, NULL, mms_ts_interrupt,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
MELFAS_TS_NAME, info);
if (ret < 0) {
ret = 1;
dev_err(&client->dev, "Failed to register interrupt\n");
goto err_req_irq;
}
info->irq = client->irq;
barrier();
dev_info(&client->dev,
"Melfas MMS-series touch controller initialized\n");
return 0;
err_req_irq:
return ret;
}
static int mms_ts_fw_info(struct mms_ts_info *info)
{
struct i2c_client *client = info->client;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret = 0;
int ver, hw_rev;
ver = get_fw_version(info);
info->fw_ic_ver = ver;
dev_info(&client->dev,
"[TSP]fw version 0x%02x !!!!\n", ver);
hw_rev = get_hw_version(info);
dev_info(&client->dev,
"[TSP] hw rev = %x\n", hw_rev);
if (ver < 0 || hw_rev < 0) {
ret = 1;
dev_err(&client->dev,
"i2c fail...tsp driver unload.\n");
return ret;
}
if (!info->pdata || !info->pdata->mux_fw_flash) {
ret = 1;
dev_err(&client->dev,
"fw cannot be updated, missing platform data\n");
return ret;
}
ret = mms_ts_finish_config(info);
return ret;
}
static int mms_ts_fw_load(struct mms_ts_info *info)
{
struct i2c_client *client = info->client;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret = 0;
int ver, hw_rev;
int retries = 3;
ver = get_fw_version(info);
info->fw_ic_ver = ver;
dev_info(&client->dev,
"[TSP]fw version 0x%02x !!!!\n", ver);
hw_rev = get_hw_version(info);
dev_info(&client->dev,
"[TSP]hw rev = 0x%02x\n", hw_rev);
pr_err("[TSP] ISC Ver [0x%02x] [0x%02x] [0x%02x]",
i2c_smbus_read_byte_data(info->client, 0xF3),
i2c_smbus_read_byte_data(info->client, 0xF4),
i2c_smbus_read_byte_data(info->client, 0xF5));
if (!info->pdata || !info->pdata->mux_fw_flash) {
ret = 1;
dev_err(&client->dev,
"fw cannot be updated, missing platform data\n");
goto out;
}
/* 4.8" OCTA LCD FW */
if (ver >= FW_VERSION_4_8 && ver != 0xFF\
&& ver != 0x00 && ver != 0x45) {
dev_info(&client->dev,
"4.8 fw version update does not need\n");
goto done;
}
while (retries--) {
ret = mms100_ISC_download_mbinary(info);
ver = get_fw_version(info);
info->fw_ic_ver = ver;
if (ret == 0) {
pr_err("[TSP] mms100_ISC_download_mbinary success");
goto done;
} else {
pr_err("[TSP] mms100_ISC_download_mbinary fail [%d]",
ret);
ret = 1;
}
dev_err(&client->dev, "retrying flashing\n");
}
out:
return ret;
done:
#if ISC_DL_MODE /* ISC_DL_MODE start */
pr_err("[TSP] ISC Ver [0x%02x] [0x%02x] [0x%02x]",
i2c_smbus_read_byte_data(info->client, 0xF3),
i2c_smbus_read_byte_data(info->client, 0xF4),
i2c_smbus_read_byte_data(info->client, 0xF5));
#endif
ret = mms_ts_finish_config(info);
return ret;
}
#ifdef SEC_TSP_FACTORY_TEST
static void set_cmd_result(struct mms_ts_info *info, char *buff, int len)
{
strncat(info->cmd_result, buff, len);
}
static void get_raw_data_all(struct mms_ts_info *info, u8 cmd)
{
u8 w_buf[6];
u8 read_buffer[2]; /* 52 */
int gpio;
int ret;
int i, j;
u32 max_value = 0, min_value = 0;
u32 raw_data;
char buff[TSP_CMD_STR_LEN] = {0};
gpio = info->pdata->gpio_int;
/* gpio = msm_irq_to_gpio(info->irq); */
disable_irq(info->irq);
w_buf[0] = MMS_VSC_CMD; /* vendor specific command id */
w_buf[1] = MMS_VSC_MODE; /* mode of vendor */
w_buf[2] = 0; /* tx line */
w_buf[3] = 0; /* rx line */
w_buf[4] = 0; /* reserved */
w_buf[5] = 0; /* sub command */
if (cmd == MMS_VSC_CMD_EXIT) {
w_buf[5] = MMS_VSC_CMD_EXIT; /* exit test mode */
ret = i2c_smbus_write_i2c_block_data(info->client,
w_buf[0], 5, &w_buf[1]);
if (ret < 0)
goto err_i2c;
enable_irq(info->irq);
msleep(200);
return;
}
/* MMS_VSC_CMD_CM_DELTA or MMS_VSC_CMD_CM_ABS
* this two mode need to enter the test mode
* exit command must be followed by testing.
*/
if (cmd == MMS_VSC_CMD_CM_DELTA || cmd == MMS_VSC_CMD_CM_ABS) {
/* enter the debug mode */
w_buf[2] = 0x0; /* tx */
w_buf[3] = 0x0; /* rx */
w_buf[5] = MMS_VSC_CMD_ENTER;
ret = i2c_smbus_write_i2c_block_data(info->client,
w_buf[0], 5, &w_buf[1]);
if (ret < 0)
goto err_i2c;
/* wating for the interrupt */
while (gpio_get_value(gpio))
udelay(100);
}
for (i = 0; i < RX_NUM; i++) {
for (j = 0; j < TX_NUM; j++) {
w_buf[2] = j; /* tx */
w_buf[3] = i; /* rx */
w_buf[5] = cmd;
ret = i2c_smbus_write_i2c_block_data(info->client,
w_buf[0], 5, &w_buf[1]);
if (ret < 0)
goto err_i2c;
usleep_range(1, 5);
ret = i2c_smbus_read_i2c_block_data(info->client, 0xBF,
2, read_buffer);
if (ret < 0)
goto err_i2c;
raw_data = ((u16) read_buffer[1] << 8) | read_buffer[0];
if (i == 0 && j == 0) {
max_value = min_value = raw_data;
} else {
max_value = max(max_value, raw_data);
min_value = min(min_value, raw_data);
}
if (cmd == MMS_VSC_CMD_INTENSITY) {
info->intensity[i * TX_NUM + j] = raw_data;
dev_dbg(&info->client->dev, "[TSP] intensity[%d][%d] = %d\n",
j, i, info->intensity[i * TX_NUM + j]);
} else if (cmd == MMS_VSC_CMD_CM_DELTA) {
info->inspection[i * TX_NUM + j] = raw_data;
dev_dbg(&info->client->dev, "[TSP] delta[%d][%d] = %d\n",
j, i, info->inspection[i * TX_NUM + j]);
} else if (cmd == MMS_VSC_CMD_CM_ABS) {
info->raw[i * TX_NUM + j] = raw_data;
dev_dbg(&info->client->dev, "[TSP] raw[%d][%d] = %d\n",
j, i, info->raw[i * TX_NUM + j]);
} else if (cmd == MMS_VSC_CMD_REFER) {
info->reference[i * TX_NUM + j] =
raw_data >> 3;
dev_dbg(&info->client->dev, "[TSP] reference[%d][%d] = %d\n",
j, i, info->reference[i * TX_NUM + j]);
}
}
}
snprintf(buff, sizeof(buff), "%d,%d", min_value, max_value);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
enable_irq(info->irq);
err_i2c:
dev_err(&info->client->dev, "%s: fail to i2c (cmd=%d)\n",
__func__, cmd);
}
static u32 get_raw_data_one(struct mms_ts_info *info, u16 rx_idx, u16 tx_idx,
u8 cmd)
{
u8 w_buf[6];
u8 read_buffer[2];
int ret;
u32 raw_data;
w_buf[0] = MMS_VSC_CMD; /* vendor specific command id */
w_buf[1] = MMS_VSC_MODE; /* mode of vendor */
w_buf[2] = 0; /* tx line */
w_buf[3] = 0; /* rx line */
w_buf[4] = 0; /* reserved */
w_buf[5] = 0; /* sub command */
if (cmd != MMS_VSC_CMD_INTENSITY && cmd != MMS_VSC_CMD_RAW &&
cmd != MMS_VSC_CMD_REFER) {
dev_err(&info->client->dev, "%s: not profer command(cmd=%d)\n",
__func__, cmd);
return FAIL;
}
w_buf[2] = tx_idx; /* tx */
w_buf[3] = rx_idx; /* rx */
w_buf[5] = cmd; /* sub command */
ret = i2c_smbus_write_i2c_block_data(info->client, w_buf[0], 5,
&w_buf[1]);
if (ret < 0)
goto err_i2c;
ret = i2c_smbus_read_i2c_block_data(info->client, 0xBF, 2, read_buffer);
if (ret < 0)
goto err_i2c;
raw_data = ((u16) read_buffer[1] << 8) | read_buffer[0];
if (cmd == MMS_VSC_CMD_REFER)
raw_data = raw_data >> 4;
return raw_data;
err_i2c:
dev_err(&info->client->dev, "%s: fail to i2c (cmd=%d)\n",
__func__, cmd);
return FAIL;
}
static ssize_t show_close_tsp_test(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mms_ts_info *info = dev_get_drvdata(dev);
get_raw_data_all(info, MMS_VSC_CMD_EXIT);
info->ft_flag = 0;
return snprintf(buf, TSP_BUF_SIZE, "%u\n", 0);
}
static void set_default_result(struct mms_ts_info *info)
{
char delim = ':';
memset(info->cmd_result, 0x00, ARRAY_SIZE(info->cmd_result));
memcpy(info->cmd_result, info->cmd, strlen(info->cmd));
strncat(info->cmd_result, &delim, 1);
}
static int check_rx_tx_num(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[TSP_CMD_STR_LEN] = {0};
int node;
if (info->cmd_param[0] < 0 ||
info->cmd_param[0] >= TX_NUM ||
info->cmd_param[1] < 0 ||
info->cmd_param[1] >= RX_NUM) {
snprintf(buff, sizeof(buff) , "%s", "NG");
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 3;
dev_info(&info->client->dev, "%s: parameter error: %u,%u\n",
__func__, info->cmd_param[0],
info->cmd_param[1]);
node = -1;
return node;
}
node = info->cmd_param[1] * TX_NUM + info->cmd_param[0];
dev_info(&info->client->dev, "%s: node = %d\n", __func__,
node);
return node;
}
static void not_support_cmd(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
set_default_result(info);
sprintf(buff, "%s", "NA");
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 4;
dev_info(&info->client->dev, "%s: \"%s(%d)\"\n", __func__,
buff, strnlen(buff, sizeof(buff)));
return;
}
static void fw_update(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
struct i2c_client *client = info->client;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret = 0;
int ver = 0, fw_bin_ver = 0;
int retries = 5;
const u8 *buff = 0;
mm_segment_t old_fs = {0};
struct file *fp = NULL;
long fsize = 0, nread = 0;
char fw_path[MAX_FW_PATH+1];
char result[16] = {0};
set_default_result(info);
dev_info(&client->dev,
"fw_ic_ver = 0x%02x, fw_bin_ver = 0x%02x\n",
info->fw_ic_ver, fw_bin_ver);
switch (info->cmd_param[0]) {
case BUILT_IN:
dev_info(&client->dev, "built in 4.8 fw is loaded!!\n");
while (retries--) {
ret = mms100_ISC_download_mbinary(info);
ver = get_fw_version(info);
info->fw_ic_ver = ver;
if (ret == 0) {
pr_err("[TSP] mms100_ISC_download_mbinary success");
info->cmd_state = 2;
return;
} else {
pr_err("[TSP] mms100_ISC_download_mbinary fail[%d]",
ret);
info->cmd_state = 3;
}
}
return;
break;
case UMS:
old_fs = get_fs();
set_fs(get_ds());
snprintf(fw_path, MAX_FW_PATH, "/sdcard/%s", TSP_FW_FILENAME);
fp = filp_open(fw_path, O_RDONLY, 0);
if (IS_ERR(fp)) {
dev_err(&client->dev,
"file %s open error:%d\n", fw_path, (s32)fp);
info->cmd_state = 3;
goto err_open;
}
fsize = fp->f_path.dentry->d_inode->i_size;
buff = kzalloc((size_t)fsize, GFP_KERNEL);
if (!buff) {
dev_err(&client->dev, "fail to alloc buffer for fw\n");
info->cmd_state = 3;
goto err_alloc;
}
nread = vfs_read(fp, (char __user *)buff, fsize, &fp->f_pos);
if (nread != fsize) {
/*dev_err("fail to read file %s (nread = %d)\n",
fw_path, nread);*/
info->cmd_state = 3;
goto err_fw_size;
}
filp_close(fp, current->files);
set_fs(old_fs);
dev_info(&client->dev, "ums fw is loaded!!\n");
break;
default:
dev_err(&client->dev, "invalid fw file type!!\n");
goto not_support;
}
disable_irq(info->irq);
while (retries--) {
i2c_lock_adapter(adapter);
info->pdata->mux_fw_flash(true);
ret = fw_download(info, (const u8 *)buff,
(const size_t)fsize);
info->pdata->mux_fw_flash(false);
i2c_unlock_adapter(adapter);
if (ret < 0) {
dev_err(&client->dev, "retrying flashing\n");
continue;
}
ver = get_fw_version(info);
info->fw_ic_ver = ver;
if (info->cmd_param[0] == 1 || info->cmd_param[0] == 2) {
dev_info(&client->dev,
"fw update done. ver = 0x%02x\n", ver);
info->cmd_state = 2;
snprintf(result, sizeof(result) , "%s", "OK");
set_cmd_result(info, result,
strnlen(result, sizeof(result)));
enable_irq(info->irq);
kfree(buff);
return;
} else if (ver == fw_bin_ver) {
dev_info(&client->dev,
"fw update done. ver = 0x%02x\n", ver);
info->cmd_state = 2;
snprintf(result, sizeof(result) , "%s", "OK");
set_cmd_result(info, result,
strnlen(result, sizeof(result)));
enable_irq(info->irq);
return;
} else {
dev_err(&client->dev,
"ERROR : fw version is still wrong (0x%x != 0x%x)\n",
ver, fw_bin_ver);
}
dev_err(&client->dev, "retrying flashing\n");
}
if (fp != NULL) {
err_fw_size:
kfree(buff);
err_alloc:
filp_close(fp, NULL);
err_open:
set_fs(old_fs);
}
not_support:
do_not_need_update:
snprintf(result, sizeof(result) , "%s", "NG");
set_cmd_result(info, result, strnlen(result, sizeof(result)));
return;
}
static void get_fw_ver_bin(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
int hw_rev;
set_default_result(info);
snprintf(buff, sizeof(buff), "%#02x", FW_VERSION_4_8);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__,
buff, strnlen(buff, sizeof(buff)));
}
static void get_fw_ver_ic(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
int ver;
set_default_result(info);
ver = info->fw_ic_ver;
snprintf(buff, sizeof(buff), "%#02x", ver);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__,
buff, strnlen(buff, sizeof(buff)));
}
static void get_config_ver(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[20] = {0};
set_default_result(info);
snprintf(buff, sizeof(buff), "%s", info->config_fw_version);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__,
buff, strnlen(buff, sizeof(buff)));
}
static void get_threshold(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
int threshold;
set_default_result(info);
threshold = i2c_smbus_read_byte_data(info->client, 0x05);
if (threshold < 0) {
snprintf(buff, sizeof(buff), "%s", "NG");
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 3;
return;
}
snprintf(buff, sizeof(buff), "%d", threshold);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__,
buff, strnlen(buff, sizeof(buff)));
}
/*
static void module_off_master(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[3] = {0};
mutex_lock(&info->lock);
if (info->enabled) {
disable_irq(info->irq);
info->enabled = false;
touch_is_pressed = 0;
}
mutex_unlock(&info->lock);
info->pdata->power(0);
if (info->pdata->is_vdd_on() == 0)
snprintf(buff, sizeof(buff), "%s", "OK");
else
snprintf(buff, sizeof(buff), "%s", "NG");
set_default_result(info);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
if (strncmp(buff, "OK", 2) == 0)
info->cmd_state = 2;
else
info->cmd_state = 3;
dev_info(&info->client->dev, "%s: %s\n", __func__, buff);
}
static void module_on_master(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[3] = {0};
mms_pwr_on_reset(info);
mutex_lock(&info->lock);
if (!info->enabled) {
enable_irq(info->irq);
info->enabled = true;
}
mutex_unlock(&info->lock);
if (info->pdata->is_vdd_on() == 1)
snprintf(buff, sizeof(buff), "%s", "OK");
else
snprintf(buff, sizeof(buff), "%s", "NG");
set_default_result(info);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
if (strncmp(buff, "OK", 2) == 0)
info->cmd_state = 2;
else
info->cmd_state = 3;
dev_info(&info->client->dev, "%s: %s\n", __func__, buff);
}
static void module_off_slave(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
not_support_cmd(info);
}
static void module_on_slave(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
not_support_cmd(info);
}
*/
static void get_chip_vendor(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
set_default_result(info);
snprintf(buff, sizeof(buff), "%s", "MELFAS");
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__,
buff, strnlen(buff, sizeof(buff)));
}
static void get_chip_name(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
set_default_result(info);
snprintf(buff, sizeof(buff), "%s", "MMS144");
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__,
buff, strnlen(buff, sizeof(buff)));
}
static void get_reference(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
unsigned int val;
int node;
set_default_result(info);
node = check_rx_tx_num(info);
if (node < 0)
return;
val = info->reference[node];
snprintf(buff, sizeof(buff), "%u", val);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__,
buff, strnlen(buff, sizeof(buff)));
}
static void get_cm_abs(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
unsigned int val;
int node;
set_default_result(info);
node = check_rx_tx_num(info);
if (node < 0)
return;
val = info->raw[node];
snprintf(buff, sizeof(buff), "%u", val);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff,
strnlen(buff, sizeof(buff)));
}
static void get_cm_delta(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
unsigned int val;
int node;
set_default_result(info);
node = check_rx_tx_num(info);
if (node < 0)
return;
val = info->inspection[node];
snprintf(buff, sizeof(buff), "%u", val);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff,
strnlen(buff, sizeof(buff)));
}
static void get_intensity(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
unsigned int val;
int node;
set_default_result(info);
node = check_rx_tx_num(info);
if (node < 0)
return;
val = info->intensity[node];
snprintf(buff, sizeof(buff), "%u", val);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff,
strnlen(buff, sizeof(buff)));
}
static void get_x_num(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
int val;
set_default_result(info);
val = i2c_smbus_read_byte_data(info->client, 0xEF);
if (val < 0) {
snprintf(buff, sizeof(buff), "%s", "NG");
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 3;
dev_info(&info->client->dev,
"%s: fail to read num of x (%d).\n", __func__, val);
return;
}
snprintf(buff, sizeof(buff), "%u", val);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff,
strnlen(buff, sizeof(buff)));
}
static void get_y_num(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
char buff[16] = {0};
int val;
set_default_result(info);
val = i2c_smbus_read_byte_data(info->client, 0xEE);
if (val < 0) {
snprintf(buff, sizeof(buff), "%s", "NG");
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 3;
dev_info(&info->client->dev,
"%s: fail to read num of y (%d).\n", __func__, val);
return;
}
snprintf(buff, sizeof(buff), "%u", val);
set_cmd_result(info, buff, strnlen(buff, sizeof(buff)));
info->cmd_state = 2;
dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff,
strnlen(buff, sizeof(buff)));
}
static void run_reference_read(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
set_default_result(info);
get_raw_data_all(info, MMS_VSC_CMD_REFER);
info->cmd_state = 2;
/* dev_info(&info->client->dev, "%s: %s(%d)\n", __func__); */
}
static void run_cm_abs_read(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
set_default_result(info);
get_raw_data_all(info, MMS_VSC_CMD_CM_ABS);
get_raw_data_all(info, MMS_VSC_CMD_EXIT);
info->cmd_state = 2;
/* dev_info(&info->client->dev, "%s: %s(%d)\n", __func__); */
}
static void run_cm_delta_read(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
set_default_result(info);
get_raw_data_all(info, MMS_VSC_CMD_CM_DELTA);
get_raw_data_all(info, MMS_VSC_CMD_EXIT);
info->cmd_state = 2;
/* dev_info(&info->client->dev, "%s: %s(%d)\n", __func__); */
}
static void run_intensity_read(void *device_data)
{
struct mms_ts_info *info = (struct mms_ts_info *)device_data;
set_default_result(info);
get_raw_data_all(info, MMS_VSC_CMD_INTENSITY);
info->cmd_state = 2;
/* dev_info(&info->client->dev, "%s: %s(%d)\n", __func__); */
}
static ssize_t store_cmd(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct mms_ts_info *info = dev_get_drvdata(dev);
struct i2c_client *client = info->client;
char *cur, *start, *end;
char buff[TSP_CMD_STR_LEN] = {0};
int len, i;
struct tsp_cmd *tsp_cmd_ptr = NULL;
char delim = ',';
bool cmd_found = false;
int param_cnt = 0;
int ret;
if (info->cmd_is_running == true) {
dev_err(&info->client->dev, "tsp_cmd: other cmd is running.\n");
goto err_out;
}
/* check lock */
mutex_lock(&info->cmd_lock);
info->cmd_is_running = true;
mutex_unlock(&info->cmd_lock);
info->cmd_state = 1;
for (i = 0; i < ARRAY_SIZE(info->cmd_param); i++)
info->cmd_param[i] = 0;
len = (int)count;
if (*(buf + len - 1) == '\n')
len--;
memset(info->cmd, 0x00, ARRAY_SIZE(info->cmd));
memcpy(info->cmd, buf, len);
cur = strchr(buf, (int)delim);
if (cur)
memcpy(buff, buf, cur - buf);
else
memcpy(buff, buf, len);
/* find command */
list_for_each_entry(tsp_cmd_ptr, &info->cmd_list_head, list) {
if (!strcmp(buff, tsp_cmd_ptr->cmd_name)) {
cmd_found = true;
break;
}
}
/* set not_support_cmd */
if (!cmd_found) {
list_for_each_entry(tsp_cmd_ptr, &info->cmd_list_head, list) {
if (!strcmp("not_support_cmd", tsp_cmd_ptr->cmd_name))
break;
}
}
/* parsing parameters */
if (cur && cmd_found) {
cur++;
start = cur;
memset(buff, 0x00, ARRAY_SIZE(buff));
do {
if (*cur == delim || cur - buf == len) {
end = cur;
memcpy(buff, start, end - start);
*(buff + strlen(buff)) = '\0';
ret = kstrtoint(buff, 10,\
info->cmd_param + param_cnt);
start = cur + 1;
memset(buff, 0x00, ARRAY_SIZE(buff));
param_cnt++;
}
cur++;
} while (cur - buf <= len);
}
dev_info(&client->dev, "cmd = %s\n", tsp_cmd_ptr->cmd_name);
for (i = 0; i < param_cnt; i++)
dev_info(&client->dev, "cmd param %d= %d\n", i,
info->cmd_param[i]);
tsp_cmd_ptr->cmd_func(info);
err_out:
return count;
}
static ssize_t show_cmd_status(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct mms_ts_info *info = dev_get_drvdata(dev);
char buff[16] = {0};
dev_info(&info->client->dev, "tsp cmd: status:%d\n",
info->cmd_state);
if (info->cmd_state == 0)
snprintf(buff, sizeof(buff), "WAITING");
else if (info->cmd_state == 1)
snprintf(buff, sizeof(buff), "RUNNING");
else if (info->cmd_state == 2)
snprintf(buff, sizeof(buff), "OK");
else if (info->cmd_state == 3)
snprintf(buff, sizeof(buff), "FAIL");
else if (info->cmd_state == 4)
snprintf(buff, sizeof(buff), "NOT_APPLICABLE");
return snprintf(buf, TSP_BUF_SIZE, "%s\n", buff);
}
static ssize_t show_cmd_result(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct mms_ts_info *info = dev_get_drvdata(dev);
dev_info(&info->client->dev, "tsp cmd: result: %s\n", info->cmd_result);
mutex_lock(&info->cmd_lock);
info->cmd_is_running = false;
mutex_unlock(&info->cmd_lock);
info->cmd_state = 0;
return snprintf(buf, TSP_BUF_SIZE, "%s\n", info->cmd_result);
}
#ifdef ESD_DEBUG
static bool intensity_log_flag;
static ssize_t show_intensity_logging_on(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct mms_ts_info *info = dev_get_drvdata(dev);
struct i2c_client *client = info->client;
struct file *fp;
char log_data[160] = { 0, };
char buff[16] = { 0, };
mm_segment_t old_fs;
long nwrite;
u32 val;
int i, y, c;
old_fs = get_fs();
set_fs(KERNEL_DS);
#define MELFAS_DEBUG_LOG_PATH "/sdcard/melfas_log"
dev_info(&client->dev, "%s: start.\n", __func__);
fp = filp_open(MELFAS_DEBUG_LOG_PATH, O_RDWR | O_CREAT,
S_IRWXU | S_IRWXG | S_IRWXO);
if (IS_ERR(fp)) {
dev_err(&client->dev, "%s: fail to open log file\n", __func__);
goto open_err;
}
intensity_log_flag = 1;
do {
for (y = 0; y < 3; y++) {
/* for tx chanel 0~2 */
memset(log_data, 0x00, 160);
snprintf(buff, 16, "%1u: ", y);
strncat(log_data, buff, strnlen(buff, 16));
for (i = 0; i < RX_NUM; i++) {
val = get_raw_data_one(info, i, y,
MMS_VSC_CMD_INTENSITY);
snprintf(buff, 16, "%5u, ", val);
strncat(log_data, buff, strnlen(buff, 16));
}
memset(buff, '\n', 2);
c = (y == 2) ? 2 : 1;
strncat(log_data, buff, c);
nwrite = vfs_write(fp, (const char __user *)log_data,
strnlen(log_data, 160), &fp->f_pos);
}
usleep_range(5000);
} while (intensity_log_flag);
filp_close(fp, current->files);
set_fs(old_fs);
return 0;
open_err:
set_fs(old_fs);
return FAIL;
}
static ssize_t show_intensity_logging_off(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct mms_ts_info *info = dev_get_drvdata(dev);
intensity_log_flag = 0;
usleep_range(10000);
get_raw_data_all(info, MMS_VSC_CMD_EXIT);
return 0;
}
#endif
static DEVICE_ATTR(close_tsp_test, S_IRUGO, show_close_tsp_test, NULL);
static DEVICE_ATTR(cmd, S_IWUSR | S_IWGRP, NULL, store_cmd);
static DEVICE_ATTR(cmd_status, S_IRUGO, show_cmd_status, NULL);
static DEVICE_ATTR(cmd_result, S_IRUGO, show_cmd_result, NULL);
#ifdef ESD_DEBUG
static DEVICE_ATTR(intensity_logging_on, S_IRUGO, show_intensity_logging_on,
NULL);
static DEVICE_ATTR(intensity_logging_off, S_IRUGO, show_intensity_logging_off,
NULL);
#endif
static struct attribute *sec_touch_facotry_attributes[] = {
&dev_attr_close_tsp_test.attr,
&dev_attr_cmd.attr,
&dev_attr_cmd_status.attr,
&dev_attr_cmd_result.attr,
#ifdef ESD_DEBUG
&dev_attr_intensity_logging_on.attr,
&dev_attr_intensity_logging_off.attr,
#endif
NULL,
};
static struct attribute_group sec_touch_factory_attr_group = {
.attrs = sec_touch_facotry_attributes,
};
#endif /* SEC_TSP_FACTORY_TEST */
static int __devinit mms_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct mms_ts_info *info;
struct input_dev *input_dev;
int ret = 0;
char buf[4] = { 0, };
#ifdef SEC_TSP_FACTORY_TEST
int i;
struct device *fac_dev_ts;
#endif
touch_is_pressed = 0;
#if 0
gpio_request(GPIO_OLED_DET, "OLED_DET");
ret = gpio_get_value(GPIO_OLED_DET);
printk(KERN_DEBUG
"[TSP] OLED_DET = %d\n", ret);
if (ret == 0) {
printk(KERN_DEBUG
"[TSP] device wasn't connected to board\n");
return -EIO;
}
#endif
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
return -EIO;
info = kzalloc(sizeof(struct mms_ts_info), GFP_KERNEL);
if (!info) {
dev_err(&client->dev, "Failed to allocate memory\n");
ret = -ENOMEM;
goto err_alloc;
}
input_dev = input_allocate_device();
if (!input_dev) {
dev_err(&client->dev, "Failed to allocate memory for input device\n");
ret = -ENOMEM;
goto err_input_alloc;
}
info->client = client;
info->input_dev = input_dev;
info->pdata = client->dev.platform_data;
if (NULL == info->pdata) {
pr_err("failed to get platform data\n");
goto err_reg_input_dev;
}
info->irq = -1;
mutex_init(&info->lock);
if (info->pdata) {
info->max_x = info->pdata->max_x;
info->max_y = info->pdata->max_y;
info->invert_x = info->pdata->invert_x;
info->invert_y = info->pdata->invert_y;
info->config_fw_version = info->pdata->config_fw_version;
info->register_cb = info->pdata->register_cb;
} else {
info->max_x = 720;
info->max_y = 1280;
}
snprintf(info->phys, sizeof(info->phys),
"%s/input0", dev_name(&client->dev));
input_dev->name = "sec_touchscreen"; /*= "Melfas MMSxxx Touchscreen";*/
input_dev->phys = info->phys;
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
input_mt_init_slots(input_dev, MAX_FINGERS);
input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR,
0, MAX_WIDTH, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
0, (info->max_x)-1, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
0, (info->max_y)-1, 0, 0);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
0, MAX_PRESSURE, 0, 0);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
0, MAX_PRESSURE, 0, 0);
input_set_abs_params(input_dev, ABS_MT_ANGLE,
MIN_ANGLE, MAX_ANGLE, 0, 0);
input_set_abs_params(input_dev, ABS_MT_PALM,
0, 1, 0, 0);
input_set_drvdata(input_dev, info);
ret = input_register_device(input_dev);
if (ret) {
dev_err(&client->dev, "failed to register input dev (%d)\n",
ret);
goto err_reg_input_dev;
}
#if TOUCH_BOOSTER
mutex_init(&info->dvfs_lock);
INIT_DELAYED_WORK(&info->work_dvfs_off, set_dvfs_off);
INIT_DELAYED_WORK(&info->work_dvfs_chg, change_dvfs_lock);
bus_dev = dev_get("exynos-busfreq");
info->cpufreq_level = -1;
info->dvfs_lock_status = false;
#endif
i2c_set_clientdata(client, info);
info->pdata->power(true);
msleep(100);
ret = i2c_master_recv(client, buf, 1);
if (ret < 0) { /* tsp connect check */
pr_err("%s: i2c fail...tsp driver unload [%d], Add[%d]\n",
__func__, ret, info->client->addr);
goto err_config;
}
ret = mms_ts_fw_load(info);
/* ret = mms_ts_fw_info(info); */
if (ret) {
dev_err(&client->dev, "failed to initialize (%d)\n", ret);
goto err_config;
}
info->enabled = true;
info->callbacks.inform_charger = melfas_ta_cb;
if (info->register_cb)
info->register_cb(&info->callbacks);
#ifdef CONFIG_HAS_EARLYSUSPEND
info->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
info->early_suspend.suspend = mms_ts_early_suspend;
info->early_suspend.resume = mms_ts_late_resume;
register_early_suspend(&info->early_suspend);
#endif
sec_touchscreen = device_create(sec_class,
NULL, 0, info, "sec_touchscreen");
if (IS_ERR(sec_touchscreen)) {
dev_err(&client->dev,
"Failed to create device for the sysfs1\n");
ret = -ENODEV;
}
#ifdef SEC_TSP_FACTORY_TEST
INIT_LIST_HEAD(&info->cmd_list_head);
for (i = 0; i < ARRAY_SIZE(tsp_cmds); i++)
list_add_tail(&tsp_cmds[i].list, &info->cmd_list_head);
mutex_init(&info->cmd_lock);
info->cmd_is_running = false;
fac_dev_ts = device_create(sec_class,
NULL, 0, info, "tsp");
if (IS_ERR(fac_dev_ts))
dev_err(&client->dev, "Failed to create device for the sysfs\n");
ret = sysfs_create_group(&fac_dev_ts->kobj,
&sec_touch_factory_attr_group);
if (ret)
dev_err(&client->dev, "Failed to create sysfs group\n");
#endif
return 0;
err_config:
input_unregister_device(input_dev);
err_reg_input_dev:
input_free_device(input_dev);
err_input_alloc:
input_dev = NULL;
kfree(info);
err_alloc:
return ret;
}
static int __devexit mms_ts_remove(struct i2c_client *client)
{
struct mms_ts_info *info = i2c_get_clientdata(client);
unregister_early_suspend(&info->early_suspend);
if (info->irq >= 0)
free_irq(info->irq, info);
input_unregister_device(info->input_dev);
kfree(info);
return 0;
}
#if defined(CONFIG_PM) || defined(CONFIG_HAS_EARLYSUSPEND)
static int mms_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mms_ts_info *info = i2c_get_clientdata(client);
if (!info->enabled)
return 0;
dev_notice(&info->client->dev, "%s: users=%d\n", __func__,
info->input_dev->users);
disable_irq(info->irq);
info->enabled = false;
touch_is_pressed = 0;
release_all_fingers(info);
info->pdata->power(false);
/* This delay needs to prevent unstable POR by
rapid frequently pressing of PWR key. */
msleep(50);
return 0;
}
static int mms_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mms_ts_info *info = i2c_get_clientdata(client);
int ret = 0;
if (info->enabled)
return 0;
dev_notice(&info->client->dev, "%s: users=%d\n", __func__,
info->input_dev->users);
info->pdata->power(true);
msleep(120);
if (info->ta_status) {
dev_notice(&client->dev, "TA connect!!!\n");
i2c_smbus_write_byte_data(info->client, 0x33, 0x1);
} else {
dev_notice(&client->dev, "TA disconnect!!!\n");
i2c_smbus_write_byte_data(info->client, 0x33, 0x2);
}
/* Because irq_type by EXT_INTxCON register is changed to low_level
* after wakeup, irq_type set to falling edge interrupt again.
*/
enable_irq(info->irq);
info->enabled = true;
mms_set_noise_mode(info);
return 0;
}
#endif
#ifdef CONFIG_HAS_EARLYSUSPEND
static void mms_ts_early_suspend(struct early_suspend *h)
{
struct mms_ts_info *info;
info = container_of(h, struct mms_ts_info, early_suspend);
mms_ts_suspend(&info->client->dev);
}
static void mms_ts_late_resume(struct early_suspend *h)
{
struct mms_ts_info *info;
info = container_of(h, struct mms_ts_info, early_suspend);
mms_ts_resume(&info->client->dev);
}
#endif
#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
static const struct dev_pm_ops mms_ts_pm_ops = {
.suspend = mms_ts_suspend,
.resume = mms_ts_resume,
#ifdef CONFIG_HIBERNATION
.freeze = mms_ts_suspend,
.thaw = mms_ts_resume,
.restore = mms_ts_resume,
#endif
};
#endif
static const struct i2c_device_id mms_ts_id[] = {
{MELFAS_TS_NAME, 0},
{}
};
MODULE_DEVICE_TABLE(i2c, mms_ts_id);
static struct i2c_driver mms_ts_driver = {
.probe = mms_ts_probe,
.remove = __devexit_p(mms_ts_remove),
.driver = {
.name = MELFAS_TS_NAME,
#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
.pm = &mms_ts_pm_ops,
#endif
},
.id_table = mms_ts_id,
};
static int __init mms_ts_init(void)
{
return i2c_add_driver(&mms_ts_driver);
}
static void __exit mms_ts_exit(void)
{
i2c_del_driver(&mms_ts_driver);
}
module_init(mms_ts_init);
module_exit(mms_ts_exit);
/* Module information */
MODULE_DESCRIPTION("Touchscreen driver for Melfas MMS-series controllers");
MODULE_LICENSE("GPL");
| kunato/s3-u6 | drivers/input/touchscreen/mms_ts.c | C | gpl-2.0 | 78,817 |
/*
* Fast Userspace Mutexes (which I call "Futexes!").
* (C) Rusty Russell, IBM 2002
*
* Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
* (C) Copyright 2003 Red Hat Inc, All Rights Reserved
*
* Removed page pinning, fix privately mapped COW pages and other cleanups
* (C) Copyright 2003, 2004 Jamie Lokier
*
* Robust futex support started by Ingo Molnar
* (C) Copyright 2006 Red Hat Inc, All Rights Reserved
* Thanks to Thomas Gleixner for suggestions, analysis and fixes.
*
* PI-futex support started by Ingo Molnar and Thomas Gleixner
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* PRIVATE futexes by Eric Dumazet
* Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
*
* Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
* Copyright (C) IBM Corporation, 2009
* Thanks to Thomas Gleixner for conceptual design and careful reviews.
*
* Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
* enough at me, Linus for the original (flawed) idea, Matthew
* Kirkwood for proof-of-concept implementation.
*
* "The futexes are also cursed."
* "But they come in a choice of three flavours!"
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/export.h>
#include <linux/magic.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/ptrace.h>
#include <linux/sched/rt.h>
#include <linux/freezer.h>
#include <linux/hugetlb.h>
#include <asm/futex.h>
#include "rtmutex_common.h"
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
int __read_mostly futex_cmpxchg_enabled;
#endif
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
/*
* Futex flags used to encode options to functions and preserve them across
* restarts.
*/
#define FLAGS_SHARED 0x01
#define FLAGS_CLOCKRT 0x02
#define FLAGS_HAS_TIMEOUT 0x04
/*
* Priority Inheritance state:
*/
struct futex_pi_state {
/*
* list of 'owned' pi_state instances - these have to be
* cleaned up in do_exit() if the task exits prematurely:
*/
struct list_head list;
/*
* The PI object:
*/
struct rt_mutex pi_mutex;
struct task_struct *owner;
atomic_t refcount;
union futex_key key;
};
/**
* struct futex_q - The hashed futex queue entry, one per waiting task
* @list: priority-sorted list of tasks waiting on this futex
* @task: the task waiting on the futex
* @lock_ptr: the hash bucket lock
* @key: the key the futex is hashed on
* @pi_state: optional priority inheritance state
* @rt_waiter: rt_waiter storage for use with requeue_pi
* @requeue_pi_key: the requeue_pi target futex key
* @bitset: bitset for the optional bitmasked wakeup
*
* We use this hashed waitqueue, instead of a normal wait_queue_t, so
* we can wake only the relevant ones (hashed queues may be shared).
*
* A futex_q has a woken state, just like tasks have TASK_RUNNING.
* It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
* The order of wakeup is always to make the first condition true, then
* the second.
*
* PI futexes are typically woken before they are removed from the hash list via
* the rt_mutex code. See unqueue_me_pi().
*/
struct futex_q {
struct plist_node list;
struct task_struct *task;
spinlock_t *lock_ptr;
union futex_key key;
struct futex_pi_state *pi_state;
struct rt_mutex_waiter *rt_waiter;
union futex_key *requeue_pi_key;
u32 bitset;
};
static const struct futex_q futex_q_init = {
/* list gets initialized in queue_me()*/
.key = FUTEX_KEY_INIT,
.bitset = FUTEX_BITSET_MATCH_ANY
};
/*
* Hash buckets are shared by all the futex_keys that hash to the same
* location. Each key may have multiple futex_q structures, one for each task
* waiting on a futex.
*/
struct futex_hash_bucket {
spinlock_t lock;
struct plist_head chain;
};
static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
/*
* We hash on the keys returned from get_futex_key (see below).
*/
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
u32 hash = jhash2((u32*)&key->both.word,
(sizeof(key->both.word)+sizeof(key->both.ptr))/4,
key->both.offset);
return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
}
/*
* Return 1 if two futex_keys are equal, 0 otherwise.
*/
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
return (key1 && key2
&& key1->both.word == key2->both.word
&& key1->both.ptr == key2->both.ptr
&& key1->both.offset == key2->both.offset);
}
/*
* Take a reference to the resource addressed by a key.
* Can be called while holding spinlocks.
*
*/
static void get_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr)
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
ihold(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
atomic_inc(&key->private.mm->mm_count);
break;
}
}
/*
* Drop a reference to the resource addressed by a key.
* The hash bucket spinlock must not be held.
*/
static void drop_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr) {
/* If we're here then we tried to put a key we failed to get */
WARN_ON_ONCE(1);
return;
}
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
iput(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
mmdrop(key->private.mm);
break;
}
}
/**
* get_futex_key() - Get parameters which are the keys for a futex
* @uaddr: virtual address of the futex
* @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
* @rw: mapping needs to be read/write (values: VERIFY_READ,
* VERIFY_WRITE)
*
* Return: a negative error code or 0
*
* The key words are stored in *key on success.
*
* For shared mappings, it's (page->index, file_inode(vma->vm_file),
* offset_within_page). For private mappings, it's (uaddr, current->mm).
* We can usually work out the index without swapping in the page.
*
* lock_page() might sleep, the caller should not hold a spinlock.
*/
static int
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct page *page, *page_head;
int err, ro = 0;
/*
* The futex address must be "naturally" aligned.
*/
key->both.offset = address % PAGE_SIZE;
if (unlikely((address % sizeof(u32)) != 0))
return -EINVAL;
address -= key->both.offset;
/*
* PROCESS_PRIVATE futexes are fast.
* As the mm cannot disappear under us and the 'key' only needs
* virtual address, we dont even have to find the underlying vma.
* Note : We do have to check 'uaddr' is a valid user address,
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
return -EFAULT;
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key);
return 0;
}
again:
err = get_user_pages_fast(address, 1, 1, &page);
/*
* If write access is not required (eg. FUTEX_WAIT), try
* and get read-only access.
*/
if (err == -EFAULT && rw == VERIFY_READ) {
err = get_user_pages_fast(address, 1, 0, &page);
ro = 1;
}
if (err < 0)
return err;
else
err = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
page_head = page;
if (unlikely(PageTail(page))) {
put_page(page);
/* serialize against __split_huge_page_splitting() */
local_irq_disable();
if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
page_head = compound_head(page);
/*
* page_head is valid pointer but we must pin
* it before taking the PG_lock and/or
* PG_compound_lock. The moment we re-enable
* irqs __split_huge_page_splitting() can
* return and the head page can be freed from
* under us. We can't take the PG_lock and/or
* PG_compound_lock on a page that could be
* freed from under us.
*/
if (page != page_head) {
get_page(page_head);
put_page(page);
}
local_irq_enable();
} else {
local_irq_enable();
goto again;
}
}
#else
page_head = compound_head(page);
if (page != page_head) {
get_page(page_head);
put_page(page);
}
#endif
lock_page(page_head);
/*
* If page_head->mapping is NULL, then it cannot be a PageAnon
* page; but it might be the ZERO_PAGE or in the gate area or
* in a special mapping (all cases which we are happy to fail);
* or it may have been a good file page when get_user_pages_fast
* found it, but truncated or holepunched or subjected to
* invalidate_complete_page2 before we got the page lock (also
* cases which we are happy to fail). And we hold a reference,
* so refcount care in invalidate_complete_page's remove_mapping
* prevents drop_caches from setting mapping to NULL beneath us.
*
* The case we do have to guard against is when memory pressure made
* shmem_writepage move it from filecache to swapcache beneath us:
* an unlikely race, but we do need to retry for page_head->mapping.
*/
if (!page_head->mapping) {
int shmem_swizzled = PageSwapCache(page_head);
unlock_page(page_head);
put_page(page_head);
if (shmem_swizzled)
goto again;
return -EFAULT;
}
/*
* Private mappings are handled in a simple way.
*
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
* it's a read-only handle, it's expected that futexes attach to
* the object not the particular process.
*/
if (PageAnon(page_head)) {
/*
* A RO anonymous page will never change and thus doesn't make
* sense for futex operations.
*/
if (ro) {
err = -EFAULT;
goto out;
}
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
} else {
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.inode = page_head->mapping->host;
key->shared.pgoff = basepage_index(page);
}
get_futex_key_refs(key);
out:
unlock_page(page_head);
put_page(page_head);
return err;
}
static inline void put_futex_key(union futex_key *key)
{
drop_futex_key_refs(key);
}
/**
* fault_in_user_writeable() - Fault in user address and verify RW access
* @uaddr: pointer to faulting user space address
*
* Slow path to fixup the fault we just took in the atomic write
* access to @uaddr.
*
* We have no generic implementation of a non-destructive write to the
* user address. We know that we faulted in the atomic pagefault
* disabled section so we can as well avoid the #PF overhead by
* calling get_user_pages() right away.
*/
static int fault_in_user_writeable(u32 __user *uaddr)
{
struct mm_struct *mm = current->mm;
int ret;
down_read(&mm->mmap_sem);
ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
FAULT_FLAG_WRITE);
up_read(&mm->mmap_sem);
return ret < 0 ? ret : 0;
}
/**
* futex_top_waiter() - Return the highest priority waiter on a futex
* @hb: the hash bucket the futex_q's reside in
* @key: the futex key (to distinguish it from other futex futex_q's)
*
* Must be called with the hb lock held.
*/
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
union futex_key *key)
{
struct futex_q *this;
plist_for_each_entry(this, &hb->chain, list) {
if (match_futex(&this->key, key))
return this;
}
return NULL;
}
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
u32 uval, u32 newval)
{
int ret;
pagefault_disable();
ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
pagefault_enable();
return ret;
}
static int get_futex_value_locked(u32 *dest, u32 __user *from)
{
int ret;
pagefault_disable();
ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
pagefault_enable();
return ret ? -EFAULT : 0;
}
/*
* PI code:
*/
static int refill_pi_state_cache(void)
{
struct futex_pi_state *pi_state;
if (likely(current->pi_state_cache))
return 0;
pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
if (!pi_state)
return -ENOMEM;
INIT_LIST_HEAD(&pi_state->list);
/* pi_mutex gets initialized later */
pi_state->owner = NULL;
atomic_set(&pi_state->refcount, 1);
pi_state->key = FUTEX_KEY_INIT;
current->pi_state_cache = pi_state;
return 0;
}
static struct futex_pi_state * alloc_pi_state(void)
{
struct futex_pi_state *pi_state = current->pi_state_cache;
WARN_ON(!pi_state);
current->pi_state_cache = NULL;
return pi_state;
}
static void free_pi_state(struct futex_pi_state *pi_state)
{
if (!atomic_dec_and_test(&pi_state->refcount))
return;
/*
* If pi_state->owner is NULL, the owner is most probably dying
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
raw_spin_lock_irq(&pi_state->owner->pi_lock);
list_del_init(&pi_state->list);
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
}
if (current->pi_state_cache)
kfree(pi_state);
else {
/*
* pi_state->list is already empty.
* clear pi_state->owner.
* refcount is at 0 - put it back to 1.
*/
pi_state->owner = NULL;
atomic_set(&pi_state->refcount, 1);
current->pi_state_cache = pi_state;
}
}
/*
* Look up the task based on what TID userspace gave us.
* We dont trust it.
*/
static struct task_struct * futex_find_get_task(pid_t pid)
{
struct task_struct *p;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (p)
get_task_struct(p);
rcu_read_unlock();
return p;
}
/*
* This task is holding PI mutexes at exit time => bad.
* Kernel cleans up PI-state, but userspace is likely hosed.
* (Robust-futex cleanup is separate and might save the day for userspace.)
*/
void exit_pi_state_list(struct task_struct *curr)
{
struct list_head *next, *head = &curr->pi_state_list;
struct futex_pi_state *pi_state;
struct futex_hash_bucket *hb;
union futex_key key = FUTEX_KEY_INIT;
if (!futex_cmpxchg_enabled)
return;
/*
* We are a ZOMBIE and nobody can enqueue itself on
* pi_state_list anymore, but we have to be careful
* versus waiters unqueueing themselves:
*/
raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
hb = hash_futex(&key);
raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
raw_spin_lock_irq(&curr->pi_lock);
/*
* We dropped the pi-lock, so re-check whether this
* task still owns the PI-state:
*/
if (head->next != next) {
spin_unlock(&hb->lock);
continue;
}
WARN_ON(pi_state->owner != curr);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
raw_spin_unlock_irq(&curr->pi_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
spin_unlock(&hb->lock);
raw_spin_lock_irq(&curr->pi_lock);
}
raw_spin_unlock_irq(&curr->pi_lock);
}
/*
* We need to check the following states:
*
* Waiter | pi_state | pi->owner | uTID | uODIED | ?
*
* [1] NULL | --- | --- | 0 | 0/1 | Valid
* [2] NULL | --- | --- | >0 | 0/1 | Valid
*
* [3] Found | NULL | -- | Any | 0/1 | Invalid
*
* [4] Found | Found | NULL | 0 | 1 | Valid
* [5] Found | Found | NULL | >0 | 1 | Invalid
*
* [6] Found | Found | task | 0 | 1 | Valid
*
* [7] Found | Found | NULL | Any | 0 | Invalid
*
* [8] Found | Found | task | ==taskTID | 0/1 | Valid
* [9] Found | Found | task | 0 | 0 | Invalid
* [10] Found | Found | task | !=taskTID | 0/1 | Invalid
*
* [1] Indicates that the kernel can acquire the futex atomically. We
* came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
*
* [2] Valid, if TID does not belong to a kernel thread. If no matching
* thread is found then it indicates that the owner TID has died.
*
* [3] Invalid. The waiter is queued on a non PI futex
*
* [4] Valid state after exit_robust_list(), which sets the user space
* value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
*
* [5] The user space value got manipulated between exit_robust_list()
* and exit_pi_state_list()
*
* [6] Valid state after exit_pi_state_list() which sets the new owner in
* the pi_state but cannot access the user space value.
*
* [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
*
* [8] Owner and user space value match
*
* [9] There is no transient state which sets the user space TID to 0
* except exit_robust_list(), but this is indicated by the
* FUTEX_OWNER_DIED bit. See [4]
*
* [10] There is no transient state which leaves owner and user space
* TID out of sync.
*/
static int
lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
union futex_key *key, struct futex_pi_state **ps)
{
struct futex_pi_state *pi_state = NULL;
struct futex_q *this, *next;
struct plist_head *head;
struct task_struct *p;
pid_t pid = uval & FUTEX_TID_MASK;
head = &hb->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex(&this->key, key)) {
/*
* Sanity check the waiter before increasing
* the refcount and attaching to it.
*/
pi_state = this->pi_state;
/*
* Userspace might have messed up non-PI and
* PI futexes [3]
*/
if (unlikely(!pi_state))
return -EINVAL;
WARN_ON(!atomic_read(&pi_state->refcount));
/*
* Handle the owner died case:
*/
if (uval & FUTEX_OWNER_DIED) {
/*
* exit_pi_state_list sets owner to NULL and
* wakes the topmost waiter. The task which
* acquires the pi_state->rt_mutex will fixup
* owner.
*/
if (!pi_state->owner) {
/*
* No pi state owner, but the user
* space TID is not 0. Inconsistent
* state. [5]
*/
if (pid)
return -EINVAL;
/*
* Take a ref on the state and
* return. [4]
*/
goto out_state;
}
/*
* If TID is 0, then either the dying owner
* has not yet executed exit_pi_state_list()
* or some waiter acquired the rtmutex in the
* pi state, but did not yet fixup the TID in
* user space.
*
* Take a ref on the state and return. [6]
*/
if (!pid)
goto out_state;
} else {
/*
* If the owner died bit is not set,
* then the pi_state must have an
* owner. [7]
*/
if (!pi_state->owner)
return -EINVAL;
}
/*
* Bail out if user space manipulated the
* futex value. If pi state exists then the
* owner TID must be the same as the user
* space TID. [9/10]
*/
if (pid != task_pid_vnr(pi_state->owner))
return -EINVAL;
out_state:
atomic_inc(&pi_state->refcount);
*ps = pi_state;
return 0;
}
}
/*
* We are the first waiter - try to look up the real owner and attach
* the new pi_state to it, but bail out when TID = 0 [1]
*/
if (!pid)
return -ESRCH;
p = futex_find_get_task(pid);
if (!p)
return -ESRCH;
if (!p->mm) {
put_task_struct(p);
return -EPERM;
}
/*
* We need to look at the task state flags to figure out,
* whether the task is exiting. To protect against the do_exit
* change of the task flags, we do this protected by
* p->pi_lock:
*/
raw_spin_lock_irq(&p->pi_lock);
if (unlikely(p->flags & PF_EXITING)) {
/*
* The task is on the way out. When PF_EXITPIDONE is
* set, we know that the task has finished the
* cleanup:
*/
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return ret;
}
/*
* No existing pi state. First waiter. [2]
*/
pi_state = alloc_pi_state();
/*
* Initialize the pi_mutex in locked state and make 'p'
* the owner of it:
*/
rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
/* Store the key for possible exit cleanups: */
pi_state->key = *key;
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &p->pi_state_list);
pi_state->owner = p;
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
*ps = pi_state;
return 0;
}
/**
* futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
* @uaddr: the pi futex user address
* @hb: the pi futex hash bucket
* @key: the futex key associated with uaddr and hb
* @ps: the pi_state pointer where we store the result of the
* lookup
* @task: the task to perform the atomic lock work for. This will
* be "current" except in the case of requeue pi.
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Return:
* 0 - ready to wait;
* 1 - acquired the lock;
* <0 - error
*
* The hb->lock and futex_key refs shall be held by the caller.
*/
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
union futex_key *key,
struct futex_pi_state **ps,
struct task_struct *task, int set_waiters)
{
int lock_taken, ret, force_take = 0;
u32 uval, newval, curval, vpid = task_pid_vnr(task);
retry:
ret = lock_taken = 0;
/*
* To avoid races, we attempt to take the lock here again
* (by doing a 0 -> TID atomic cmpxchg), while holding all
* the locks. It will most likely not succeed.
*/
newval = vpid;
if (set_waiters)
newval |= FUTEX_WAITERS;
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
return -EFAULT;
/*
* Detect deadlocks.
*/
if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
return -EDEADLK;
/*
* Surprise - we got the lock, but we do not trust user space at all.
*/
if (unlikely(!curval)) {
/*
* We verify whether there is kernel state for this
* futex. If not, we can safely assume, that the 0 ->
* TID transition is correct. If state exists, we do
* not bother to fixup the user space state as it was
* corrupted already.
*/
return futex_top_waiter(hb, key) ? -EINVAL : 1;
}
uval = curval;
/*
* Set the FUTEX_WAITERS flag, so the owner will know it has someone
* to wake at the next unlock.
*/
newval = curval | FUTEX_WAITERS;
/*
* Should we force take the futex? See below.
*/
if (unlikely(force_take)) {
/*
* Keep the OWNER_DIED and the WAITERS bit and set the
* new TID value.
*/
newval = (curval & ~FUTEX_TID_MASK) | vpid;
force_take = 0;
lock_taken = 1;
}
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
return -EFAULT;
if (unlikely(curval != uval))
goto retry;
/*
* We took the lock due to forced take over.
*/
if (unlikely(lock_taken))
return 1;
/*
* We dont have the lock. Look up the PI state (or create it if
* we are the first waiter):
*/
ret = lookup_pi_state(uval, hb, key, ps);
if (unlikely(ret)) {
switch (ret) {
case -ESRCH:
/*
* We failed to find an owner for this
* futex. So we have no pi_state to block
* on. This can happen in two cases:
*
* 1) The owner died
* 2) A stale FUTEX_WAITERS bit
*
* Re-read the futex value.
*/
if (get_futex_value_locked(&curval, uaddr))
return -EFAULT;
/*
* If the owner died or we have a stale
* WAITERS bit the owner TID in the user space
* futex is 0.
*/
if (!(curval & FUTEX_TID_MASK)) {
force_take = 1;
goto retry;
}
default:
break;
}
}
return ret;
}
/**
* __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be NULL and must be held by the caller.
*/
static void __unqueue_futex(struct futex_q *q)
{
struct futex_hash_bucket *hb;
if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
|| WARN_ON(plist_node_empty(&q->list)))
return;
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
}
/*
* The hash bucket lock must be held when this is called.
* Afterwards, the futex_q must not be accessed.
*/
static void wake_futex(struct futex_q *q)
{
struct task_struct *p = q->task;
if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
return;
/*
* We set q->lock_ptr = NULL _before_ we wake up the task. If
* a non-futex wake up happens on another CPU then the task
* might exit and p would dereference a non-existing task
* struct. Prevent this by holding a reference on p across the
* wake up.
*/
get_task_struct(p);
__unqueue_futex(q);
/*
* The waiting task can free the futex_q as soon as
* q->lock_ptr = NULL is written, without taking any locks. A
* memory barrier is required here to prevent the following
* store to lock_ptr from getting ahead of the plist_del.
*/
smp_wmb();
q->lock_ptr = NULL;
wake_up_state(p, TASK_NORMAL);
put_task_struct(p);
}
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
{
struct task_struct *new_owner;
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
int ret = 0;
if (!pi_state)
return -EINVAL;
/*
* If current does not own the pi_state then the futex is
* inconsistent and user space fiddled with the futex value.
*/
if (pi_state->owner != current)
return -EINVAL;
raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/*
* It is possible that the next waiter (the one that brought
* this owner to the kernel) timed out and is no longer
* waiting on the lock.
*/
if (!new_owner)
new_owner = this->task;
/*
* We pass it to the next owner. The WAITERS bit is always
* kept enabled while there is PI state around. We cleanup the
* owner died bit, because we are the owner.
*/
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
ret = -EFAULT;
else if (curval != uval)
ret = -EINVAL;
if (ret) {
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
}
raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
raw_spin_lock_irq(&new_owner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &new_owner->pi_state_list);
pi_state->owner = new_owner;
raw_spin_unlock_irq(&new_owner->pi_lock);
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
return 0;
}
static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
{
u32 uninitialized_var(oldval);
/*
* There is no waiter, so we unlock the futex. The owner died
* bit has not to be preserved here. We are the owner:
*/
if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
return -EFAULT;
if (oldval != uval)
return -EAGAIN;
return 0;
}
/*
* Express the locking dependencies for lockdep:
*/
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
if (hb1 <= hb2) {
spin_lock(&hb1->lock);
if (hb1 < hb2)
spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
} else { /* hb1 > hb2 */
spin_lock(&hb2->lock);
spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
}
}
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
spin_unlock(&hb1->lock);
if (hb1 != hb2)
spin_unlock(&hb2->lock);
}
/*
* Wake up waiters matching bitset queued on this futex (uaddr).
*/
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT;
int ret;
if (!bitset)
return -EINVAL;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
hb = hash_futex(&key);
spin_lock(&hb->lock);
head = &hb->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
break;
}
/* Check if one of the bits is set in both bitsets */
if (!(this->bitset & bitset))
continue;
wake_futex(this);
if (++ret >= nr_wake)
break;
}
}
spin_unlock(&hb->lock);
put_futex_key(&key);
out:
return ret;
}
/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
*/
static int
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
int nr_wake, int nr_wake2, int op)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head;
struct futex_q *this, *next;
int ret, op_ret;
retry:
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
double_lock_hb(hb1, hb2);
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
double_unlock_hb(hb1, hb2);
#ifndef CONFIG_MMU
/*
* we don't get EFAULT from MMU faults if we don't have an MMU,
* but we might get them from range checking
*/
ret = op_ret;
goto out_put_keys;
#endif
if (unlikely(op_ret != -EFAULT)) {
ret = op_ret;
goto out_put_keys;
}
ret = fault_in_user_writeable(uaddr2);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
head = &hb1->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key1)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
goto out_unlock;
}
wake_futex(this);
if (++ret >= nr_wake)
break;
}
}
if (op_ret > 0) {
head = &hb2->chain;
op_ret = 0;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key2)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
goto out_unlock;
}
wake_futex(this);
if (++op_ret >= nr_wake2)
break;
}
}
ret += op_ret;
}
out_unlock:
double_unlock_hb(hb1, hb2);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
return ret;
}
/**
* requeue_futex() - Requeue a futex_q from one hb to another
* @q: the futex_q to requeue
* @hb1: the source hash_bucket
* @hb2: the target hash_bucket
* @key2: the new key for the requeued futex_q
*/
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
struct futex_hash_bucket *hb2, union futex_key *key2)
{
/*
* If key1 and key2 hash to the same bucket, no need to
* requeue.
*/
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
q->key = *key2;
}
/**
* requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
* @q: the futex_q
* @key: the key of the requeue target futex
* @hb: the hash_bucket of the requeue target futex
*
* During futex_requeue, with requeue_pi=1, it is possible to acquire the
* target futex if it is uncontended or via a lock steal. Set the futex_q key
* to the requeue target futex so the waiter can detect the wakeup on the right
* futex, but remove it from the hb and NULL the rt_waiter so it can detect
* atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
* to protect access to the pi_state to fixup the owner later. Must be called
* with both q->lock_ptr and hb->lock held.
*/
static inline
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
struct futex_hash_bucket *hb)
{
get_futex_key_refs(key);
q->key = *key;
__unqueue_futex(q);
WARN_ON(!q->rt_waiter);
q->rt_waiter = NULL;
q->lock_ptr = &hb->lock;
wake_up_state(q->task, TASK_NORMAL);
}
/**
* futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
* @pifutex: the user address of the to futex
* @hb1: the from futex hash bucket, must be locked by the caller
* @hb2: the to futex hash bucket, must be locked by the caller
* @key1: the from futex key
* @key2: the to futex key
* @ps: address to store the pi_state pointer
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Try and get the lock on behalf of the top waiter if we can do it atomically.
* Wake the top waiter if we succeed. If the caller specified set_waiters,
* then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
* hb1 and hb2 must be held by the caller.
*
* Return:
* 0 - failed to acquire the lock atomically;
* >0 - acquired the lock, return value is vpid of the top_waiter
* <0 - error
*/
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
struct futex_hash_bucket *hb1,
struct futex_hash_bucket *hb2,
union futex_key *key1, union futex_key *key2,
struct futex_pi_state **ps, int set_waiters)
{
struct futex_q *top_waiter = NULL;
u32 curval;
int ret, vpid;
if (get_futex_value_locked(&curval, pifutex))
return -EFAULT;
/*
* Find the top_waiter and determine if there are additional waiters.
* If the caller intends to requeue more than 1 waiter to pifutex,
* force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
* as we have means to handle the possible fault. If not, don't set
* the bit unecessarily as it will force the subsequent unlock to enter
* the kernel.
*/
top_waiter = futex_top_waiter(hb1, key1);
/* There are no waiters, nothing for us to do. */
if (!top_waiter)
return 0;
/* Ensure we requeue to the expected futex. */
if (!match_futex(top_waiter->requeue_pi_key, key2))
return -EINVAL;
/*
* Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
* the contended case or if set_waiters is 1. The pi_state is returned
* in ps in contended cases.
*/
vpid = task_pid_vnr(top_waiter->task);
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
set_waiters);
if (ret == 1) {
requeue_pi_wake_futex(top_waiter, key2, hb2);
return vpid;
}
return ret;
}
/**
* futex_requeue() - Requeue waiters from uaddr1 to uaddr2
* @uaddr1: source futex user address
* @flags: futex flags (FLAGS_SHARED, etc.)
* @uaddr2: target futex user address
* @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
* @nr_requeue: number of waiters to requeue (0-INT_MAX)
* @cmpval: @uaddr1 expected value (or %NULL)
* @requeue_pi: if we are attempting to requeue from a non-pi futex to a
* pi futex (pi to pi requeue is not supported)
*
* Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
* uaddr2 atomically on behalf of the top waiter.
*
* Return:
* >=0 - on success, the number of tasks requeued or woken;
* <0 - on error
*/
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
u32 __user *uaddr2, int nr_wake, int nr_requeue,
u32 *cmpval, int requeue_pi)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
int drop_count = 0, task_count = 0, ret;
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head1;
struct futex_q *this, *next;
u32 curval2;
if (requeue_pi) {
/*
* Requeue PI only works on two distinct uaddrs. This
* check is only valid for private futexes. See below.
*/
if (uaddr1 == uaddr2)
return -EINVAL;
/*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
*/
if (refill_pi_state_cache())
return -ENOMEM;
/*
* requeue_pi must wake as many tasks as it can, up to nr_wake
* + nr_requeue, since it acquires the rt_mutex prior to
* returning to userspace, so as to not leave the rt_mutex with
* waiters and no owner. However, second and third wake-ups
* cannot be predicted as they involve race conditions with the
* first wake and a fault while looking up the pi_state. Both
* pthread_cond_signal() and pthread_cond_broadcast() should
* use nr_wake=1.
*/
if (nr_wake != 1)
return -EINVAL;
}
retry:
if (pi_state != NULL) {
/*
* We will have to lookup the pi_state again, so free this one
* to keep the accounting correct.
*/
free_pi_state(pi_state);
pi_state = NULL;
}
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
/*
* The check above which compares uaddrs is not sufficient for
* shared futexes. We need to compare the keys:
*/
if (requeue_pi && match_futex(&key1, &key2)) {
ret = -EINVAL;
goto out_put_keys;
}
/*
* The check above which compares uaddrs is not sufficient for
* shared futexes. We need to compare the keys:
*/
if (requeue_pi && match_futex(&key1, &key2)) {
ret = -EINVAL;
goto out_put_keys;
}
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
double_lock_hb(hb1, hb2);
if (likely(cmpval != NULL)) {
u32 curval;
ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
double_unlock_hb(hb1, hb2);
ret = get_user(curval, uaddr1);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
if (curval != *cmpval) {
ret = -EAGAIN;
goto out_unlock;
}
}
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
/*
* Attempt to acquire uaddr2 and wake the top waiter. If we
* intend to requeue waiters, force setting the FUTEX_WAITERS
* bit. We force this here where we are able to easily handle
* faults rather in the requeue loop below.
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
&key2, &pi_state, nr_requeue);
/*
* At this point the top_waiter has either taken uaddr2 or is
* waiting on it. If the former, then the pi_state will not
* exist yet, look it up one more time to ensure we have a
* reference to it. If the lock was taken, ret contains the
* vpid of the top waiter task.
*/
if (ret > 0) {
WARN_ON(pi_state);
drop_count++;
task_count++;
/*
* If we acquired the lock, then the user
* space value of uaddr2 should be vpid. It
* cannot be changed by the top waiter as it
* is blocked on hb2 lock if it tries to do
* so. If something fiddled with it behind our
* back the pi state lookup might unearth
* it. So we rather use the known value than
* rereading and handing potential crap to
* lookup_pi_state.
*/
ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
}
switch (ret) {
case 0:
break;
case -EFAULT:
double_unlock_hb(hb1, hb2);
put_futex_key(&key2);
put_futex_key(&key1);
ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
case -EAGAIN:
/* The owner was exiting, try again. */
double_unlock_hb(hb1, hb2);
put_futex_key(&key2);
put_futex_key(&key1);
cond_resched();
goto retry;
default:
goto out_unlock;
}
}
head1 = &hb1->chain;
plist_for_each_entry_safe(this, next, head1, list) {
if (task_count - nr_wake >= nr_requeue)
break;
if (!match_futex(&this->key, &key1))
continue;
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
*
* We should never be requeueing a futex_q with a pi_state,
* which is awaiting a futex_unlock_pi().
*/
if ((requeue_pi && !this->rt_waiter) ||
(!requeue_pi && this->rt_waiter) ||
this->pi_state) {
ret = -EINVAL;
break;
}
/*
* Wake nr_wake waiters. For requeue_pi, if we acquired the
* lock, we already woke the top_waiter. If not, it will be
* woken by futex_unlock_pi().
*/
if (++task_count <= nr_wake && !requeue_pi) {
wake_futex(this);
continue;
}
/* Ensure we requeue to the expected futex for requeue_pi. */
if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
ret = -EINVAL;
break;
}
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
*/
if (requeue_pi) {
/* Prepare the waiter to take the rt_mutex. */
atomic_inc(&pi_state->refcount);
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
this->task, 1);
if (ret == 1) {
/* We got the lock. */
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
free_pi_state(pi_state);
goto out_unlock;
}
}
requeue_futex(this, hb1, hb2, &key2);
drop_count++;
}
out_unlock:
double_unlock_hb(hb1, hb2);
/*
* drop_futex_key_refs() must be called outside the spinlocks. During
* the requeue we moved futex_q's from the hash bucket at key1 to the
* one at key2 and updated their key pointer. We no longer need to
* hold the references to key1.
*/
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
if (pi_state != NULL)
free_pi_state(pi_state);
return ret ? ret : task_count;
}
/* The key must be already stored in q->key. */
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
__acquires(&hb->lock)
{
struct futex_hash_bucket *hb;
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock);
return hb;
}
static inline void
queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
spin_unlock(&hb->lock);
}
/**
* queue_me() - Enqueue the futex_q on the futex_hash_bucket
* @q: The futex_q to enqueue
* @hb: The destination hash bucket
*
* The hb->lock must be held by the caller, and is released here. A call to
* queue_me() is typically paired with exactly one call to unqueue_me(). The
* exceptions involve the PI related operations, which may use unqueue_me_pi()
* or nothing if the unqueue is done as part of the wake process and the unqueue
* state is implicit in the state of woken task (see futex_wait_requeue_pi() for
* an example).
*/
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
int prio;
/*
* The priority used to register this element is
* - either the real thread-priority for the real-time threads
* (i.e. threads with a priority lower than MAX_RT_PRIO)
* - or MAX_RT_PRIO for non-RT threads.
* Thus, all RT-threads are woken first in priority order, and
* the others are woken last, in FIFO order.
*/
prio = min(current->normal_prio, MAX_RT_PRIO);
plist_node_init(&q->list, prio);
plist_add(&q->list, &hb->chain);
q->task = current;
spin_unlock(&hb->lock);
}
/**
* unqueue_me() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
* be paired with exactly one earlier call to queue_me().
*
* Return:
* 1 - if the futex_q was still queued (and we removed unqueued it);
* 0 - if the futex_q was already removed by the waking thread
*/
static int unqueue_me(struct futex_q *q)
{
spinlock_t *lock_ptr;
int ret = 0;
/* In the common case we don't take the spinlock, which is nice. */
retry:
lock_ptr = q->lock_ptr;
barrier();
if (lock_ptr != NULL) {
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
* spin_lock(), causing us to take the wrong lock. This
* corrects the race condition.
*
* Reasoning goes like this: if we have the wrong lock,
* q->lock_ptr must have changed (maybe several times)
* between reading it and the spin_lock(). It can
* change again after the spin_lock() but only if it was
* already changed before the spin_lock(). It cannot,
* however, change back to the original value. Therefore
* we can detect whether we acquired the correct lock.
*/
if (unlikely(lock_ptr != q->lock_ptr)) {
spin_unlock(lock_ptr);
goto retry;
}
__unqueue_futex(q);
BUG_ON(q->pi_state);
spin_unlock(lock_ptr);
ret = 1;
}
drop_futex_key_refs(&q->key);
return ret;
}
/*
* PI futexes can not be requeued and must remove themself from the
* hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
* and dropped here.
*/
static void unqueue_me_pi(struct futex_q *q)
__releases(q->lock_ptr)
{
__unqueue_futex(q);
BUG_ON(!q->pi_state);
free_pi_state(q->pi_state);
q->pi_state = NULL;
spin_unlock(q->lock_ptr);
}
/*
* Fixup the pi_state owner with the new owner.
*
* Must be called with hash bucket lock held and mm->sem held for non
* private futexes.
*/
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
struct task_struct *newowner)
{
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
struct task_struct *oldowner = pi_state->owner;
u32 uval, uninitialized_var(curval), newval;
int ret;
/* Owner died? */
if (!pi_state->owner)
newtid |= FUTEX_OWNER_DIED;
/*
* We are here either because we stole the rtmutex from the
* previous highest priority waiter or we are the highest priority
* waiter but failed to get the rtmutex the first time.
* We have to replace the newowner TID in the user space variable.
* This must be atomic as we have to preserve the owner died bit here.
*
* Note: We write the user space value _before_ changing the pi_state
* because we can fault here. Imagine swapped out pages or a fork
* that marked all the anonymous memory readonly for cow.
*
* Modifying pi_state _before_ the user space value would
* leave the pi_state in an inconsistent state when we fault
* here, because we need to drop the hash bucket lock to
* handle the fault. This might be observed in the PID check
* in lookup_pi_state.
*/
retry:
if (get_futex_value_locked(&uval, uaddr))
goto handle_fault;
while (1) {
newval = (uval & FUTEX_OWNER_DIED) | newtid;
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
goto handle_fault;
if (curval == uval)
break;
uval = curval;
}
/*
* We fixed up user space. Now we need to fix the pi_state
* itself.
*/
if (pi_state->owner != NULL) {
raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
}
pi_state->owner = newowner;
raw_spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &newowner->pi_state_list);
raw_spin_unlock_irq(&newowner->pi_lock);
return 0;
/*
* To handle the page fault we need to drop the hash bucket
* lock here. That gives the other task (either the highest priority
* waiter itself or the task which stole the rtmutex) the
* chance to try the fixup of the pi_state. So once we are
* back from handling the fault we need to check the pi_state
* after reacquiring the hash bucket lock and before trying to
* do another fixup. When the fixup has been done already we
* simply return.
*/
handle_fault:
spin_unlock(q->lock_ptr);
ret = fault_in_user_writeable(uaddr);
spin_lock(q->lock_ptr);
/*
* Check if someone else fixed it for us:
*/
if (pi_state->owner != oldowner)
return 0;
if (ret)
return ret;
goto retry;
}
static long futex_wait_restart(struct restart_block *restart);
/**
* fixup_owner() - Post lock pi_state and corner case management
* @uaddr: user address of the futex
* @q: futex_q (contains pi_state and access to the rt_mutex)
* @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
*
* After attempting to lock an rt_mutex, this function is called to cleanup
* the pi_state owner as well as handle race conditions that may allow us to
* acquire the lock. Must be called with the hb lock held.
*
* Return:
* 1 - success, lock taken;
* 0 - success, lock not taken;
* <0 - on error (-EFAULT)
*/
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
{
struct task_struct *owner;
int ret = 0;
if (locked) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case:
*/
if (q->pi_state->owner != current)
ret = fixup_pi_state_owner(uaddr, q, current);
goto out;
}
/*
* Catch the rare case, where the lock was released when we were on the
* way back before we locked the hash bucket.
*/
if (q->pi_state->owner == current) {
/*
* Try to get the rt_mutex now. This might fail as some other
* task acquired the rt_mutex after we removed ourself from the
* rt_mutex waiters list.
*/
if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
locked = 1;
goto out;
}
/*
* pi_state is incorrect, some other task did a lock steal and
* we returned due to timeout or signal without taking the
* rt_mutex. Too late.
*/
raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
owner = rt_mutex_owner(&q->pi_state->pi_mutex);
if (!owner)
owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
ret = fixup_pi_state_owner(uaddr, q, owner);
goto out;
}
/*
* Paranoia check. If we did not take the lock, then we should not be
* the owner of the rt_mutex.
*/
if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
"pi-state %p\n", ret,
q->pi_state->pi_mutex.owner,
q->pi_state->owner);
out:
return ret ? ret : locked;
}
/**
* futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
* @hb: the futex hash bucket, must be locked by the caller
* @q: the futex_q to queue up on
* @timeout: the prepared hrtimer_sleeper, or null for no timeout
*/
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
struct hrtimer_sleeper *timeout)
{
/*
* The task state is guaranteed to be set before another task can
* wake it. set_current_state() is implemented using set_mb() and
* queue_me() calls spin_unlock() upon completion, both serializing
* access to the hash list and forcing another memory barrier.
*/
set_current_state(TASK_INTERRUPTIBLE);
queue_me(q, hb);
/* Arm the timer */
if (timeout) {
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
if (!hrtimer_active(&timeout->timer))
timeout->task = NULL;
}
/*
* If we have been removed from the hash list, then another task
* has tried to wake us, and we can skip the call to schedule().
*/
if (likely(!plist_node_empty(&q->list))) {
/*
* If the timer has already expired, current will already be
* flagged for rescheduling. Only call schedule if there
* is no timeout, or if it has yet to expire.
*/
if (!timeout || timeout->task)
freezable_schedule();
}
__set_current_state(TASK_RUNNING);
}
/**
* futex_wait_setup() - Prepare to wait on a futex
* @uaddr: the futex userspace address
* @val: the expected value
* @flags: futex flags (FLAGS_SHARED, etc.)
* @q: the associated futex_q
* @hb: storage for hash_bucket pointer to be returned to caller
*
* Setup the futex_q and locate the hash_bucket. Get the futex value and
* compare it with the expected value. Handle atomic faults internally.
* Return with the hb lock held and a q.key reference on success, and unlocked
* with no q.key reference on failure.
*
* Return:
* 0 - uaddr contains val and hb has been locked;
* <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
*/
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
struct futex_q *q, struct futex_hash_bucket **hb)
{
u32 uval;
int ret;
/*
* Access the page AFTER the hash-bucket is locked.
* Order is important:
*
* Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
* Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
*
* The basic logical guarantee of a futex is that it blocks ONLY
* if cond(var) is known to be true at the time of blocking, for
* any cond. If we locked the hash-bucket after testing *uaddr, that
* would open a race condition where we could block indefinitely with
* cond(var) false, which would violate the guarantee.
*
* On the other hand, we insert q and release the hash-bucket only
* after testing *uaddr. This guarantees that futex_wait() will NOT
* absorb a wakeup if *uaddr does not match the desired values
* while the syscall executes.
*/
retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
if (unlikely(ret != 0))
return ret;
retry_private:
*hb = queue_lock(q);
ret = get_futex_value_locked(&uval, uaddr);
if (ret) {
queue_unlock(q, *hb);
ret = get_user(uval, uaddr);
if (ret)
goto out;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&q->key);
goto retry;
}
if (uval != val) {
queue_unlock(q, *hb);
ret = -EWOULDBLOCK;
}
out:
if (ret)
put_futex_key(&q->key);
return ret;
}
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
ktime_t *abs_time, u32 bitset)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct restart_block *restart;
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int ret;
if (!bitset)
return -EINVAL;
q.bitset = bitset;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
retry:
/*
* Prepare to wait on uaddr. On success, holds hb lock and increments
* q.key refs.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out;
/* queue_me and wait for wakeup, timeout, or a signal. */
futex_wait_queue_me(hb, &q, to);
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
/* unqueue_me() drops q.key ref */
if (!unqueue_me(&q))
goto out;
ret = -ETIMEDOUT;
if (to && !to->task)
goto out;
/*
* We expect signal_pending(current), but we might be the
* victim of a spurious wakeup as well.
*/
if (!signal_pending(current))
goto retry;
ret = -ERESTARTSYS;
if (!abs_time)
goto out;
restart = ¤t_thread_info()->restart_block;
restart->fn = futex_wait_restart;
restart->futex.uaddr = uaddr;
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
ret = -ERESTART_RESTARTBLOCK;
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
static long futex_wait_restart(struct restart_block *restart)
{
u32 __user *uaddr = restart->futex.uaddr;
ktime_t t, *tp = NULL;
if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
t.tv64 = restart->futex.time;
tp = &t;
}
restart->fn = do_no_restart_syscall;
return (long)futex_wait(uaddr, restart->futex.flags,
restart->futex.val, tp, restart->futex.bitset);
}
/*
* Userspace tried a 0 -> TID atomic transition of the futex value
* and failed. The kernel side here does the whole locking operation:
* if there are waiters then it will block, it does PI, etc. (Due to
* races the kernel might see a 0 value of the futex too.)
*/
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int res, ret;
if (refill_pi_state_cache())
return -ENOMEM;
if (time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires(&to->timer, *time);
}
retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
retry_private:
hb = queue_lock(&q);
ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
if (unlikely(ret)) {
switch (ret) {
case 1:
/* We got the lock. */
ret = 0;
goto out_unlock_put_key;
case -EFAULT:
goto uaddr_faulted;
case -EAGAIN:
/*
* Task is exiting and we just wait for the
* exit to complete.
*/
queue_unlock(&q, hb);
put_futex_key(&q.key);
cond_resched();
goto retry;
default:
goto out_unlock_put_key;
}
}
/*
* Only actually queue now that the atomic ops are done:
*/
queue_me(&q, hb);
WARN_ON(!q.pi_state);
/*
* Block on the PI mutex:
*/
if (!trylock)
ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
else {
ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
/* Fixup the trylock return value: */
ret = ret ? 0 : -EWOULDBLOCK;
}
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it acquired
* the lock, clear our -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/*
* If fixup_owner() faulted and was unable to handle the fault, unlock
* it and return the fault to userspace.
*/
if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
rt_mutex_unlock(&q.pi_state->pi_mutex);
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
goto out_put_key;
out_unlock_put_key:
queue_unlock(&q, hb);
out_put_key:
put_futex_key(&q.key);
out:
if (to)
destroy_hrtimer_on_stack(&to->timer);
return ret != -EINTR ? ret : -ERESTARTNOINTR;
uaddr_faulted:
queue_unlock(&q, hb);
ret = fault_in_user_writeable(uaddr);
if (ret)
goto out_put_key;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&q.key);
goto retry;
}
/*
* Userspace attempted a TID -> 0 atomic transition, and failed.
* This is the in-kernel slowpath: we look up the PI state (if any),
* and do the rt-mutex unlock.
*/
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT;
u32 uval, vpid = task_pid_vnr(current);
int ret;
retry:
if (get_user(uval, uaddr))
return -EFAULT;
/*
* We release only a lock we actually own:
*/
if ((uval & FUTEX_TID_MASK) != vpid)
return -EPERM;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
hb = hash_futex(&key);
spin_lock(&hb->lock);
/*
* To avoid races, try to do the TID -> 0 atomic transition
* again. If it succeeds then we can return without waking
* anyone else up. We only try this if neither the waiters nor
* the owner died bit are set.
*/
if (!(uval & ~FUTEX_TID_MASK) &&
cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
goto pi_faulted;
/*
* Rare case: we managed to release the lock atomically,
* no need to wake anyone else up:
*/
if (unlikely(uval == vpid))
goto out_unlock;
/*
* Ok, other tasks may need to be woken up - check waiters
* and do the wakeup if necessary:
*/
head = &hb->chain;
plist_for_each_entry_safe(this, next, head, list) {
if (!match_futex (&this->key, &key))
continue;
ret = wake_futex_pi(uaddr, uval, this);
/*
* The atomic access to the futex value
* generated a pagefault, so retry the
* user-access and the wakeup:
*/
if (ret == -EFAULT)
goto pi_faulted;
goto out_unlock;
}
/*
* No waiters - kernel unlocks the futex:
*/
ret = unlock_futex_pi(uaddr, uval);
if (ret == -EFAULT)
goto pi_faulted;
out_unlock:
spin_unlock(&hb->lock);
put_futex_key(&key);
out:
return ret;
pi_faulted:
spin_unlock(&hb->lock);
put_futex_key(&key);
ret = fault_in_user_writeable(uaddr);
if (!ret)
goto retry;
return ret;
}
/**
* handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
* @hb: the hash_bucket futex_q was original enqueued on
* @q: the futex_q woken while waiting to be requeued
* @key2: the futex_key of the requeue target futex
* @timeout: the timeout associated with the wait (NULL if none)
*
* Detect if the task was woken on the initial futex as opposed to the requeue
* target futex. If so, determine if it was a timeout or a signal that caused
* the wakeup and return the appropriate error code to the caller. Must be
* called with the hb lock held.
*
* Return:
* 0 = no early wakeup detected;
* <0 = -ETIMEDOUT or -ERESTARTNOINTR
*/
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
struct futex_q *q, union futex_key *key2,
struct hrtimer_sleeper *timeout)
{
int ret = 0;
/*
* With the hb lock held, we avoid races while we process the wakeup.
* We only need to hold hb (and not hb2) to ensure atomicity as the
* wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
* It can't be requeued from uaddr2 to something else since we don't
* support a PI aware source futex for requeue.
*/
if (!match_futex(&q->key, key2)) {
WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
/*
* We were woken prior to requeue by a timeout or a signal.
* Unqueue the futex_q and determine which it was.
*/
plist_del(&q->list, &hb->chain);
/* Handle spurious wakeups gracefully */
ret = -EWOULDBLOCK;
if (timeout && !timeout->task)
ret = -ETIMEDOUT;
else if (signal_pending(current))
ret = -ERESTARTNOINTR;
}
return ret;
}
/**
* futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
* @uaddr: the futex we initially wait on (non-pi)
* @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
* the same type, no requeueing from private to shared, etc.
* @val: the expected value of uaddr
* @abs_time: absolute timeout
* @bitset: 32 bit wakeup bitset set by userspace, defaults to all
* @uaddr2: the pi futex we will take prior to returning to user-space
*
* The caller will wait on uaddr and will be requeued by futex_requeue() to
* uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
* on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
* userspace. This ensures the rt_mutex maintains an owner when it has waiters;
* without one, the pi logic would not know which task to boost/deboost, if
* there was a need to.
*
* We call schedule in futex_wait_queue_me() when we enqueue and return there
* via the following--
* 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
* 2) wakeup on uaddr2 after a requeue
* 3) signal
* 4) timeout
*
* If 3, cleanup and return -ERESTARTNOINTR.
*
* If 2, we may then block on trying to take the rt_mutex and return via:
* 5) successful lock
* 6) signal
* 7) timeout
* 8) other lock acquisition failure
*
* If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
*
* If 4 or 7, we cleanup and return with -ETIMEDOUT.
*
* Return:
* 0 - On success;
* <0 - On error
*/
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
u32 val, ktime_t *abs_time, u32 bitset,
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
if (uaddr == uaddr2)
return -EINVAL;
if (!bitset)
return -EINVAL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
/*
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
q.bitset = bitset;
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
/*
* Prepare to wait on uaddr. On success, increments q.key (key1) ref
* count.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out_key2;
/*
* The check above which compares uaddrs is not sufficient for
* shared futexes. We need to compare the keys:
*/
if (match_futex(&q.key, &key2)) {
ret = -EINVAL;
goto out_put_keys;
}
/*
* The check above which compares uaddrs is not sufficient for
* shared futexes. We need to compare the keys:
*/
if (match_futex(&q.key, &key2)) {
ret = -EINVAL;
goto out_put_keys;
}
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquisition by the requeue code. The
* futex_requeue dropped our key1 reference and incremented our key2
* reference count.
*/
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
spin_unlock(q.lock_ptr);
}
} else {
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr2, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
/*
* If fixup_pi_state_owner() faulted and was unable to handle the
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
* it would detect that the user space "val" changed and return
* -EWOULDBLOCK. Save the overhead of the restart and return
* -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
out_put_keys:
put_futex_key(&q.key);
out_key2:
put_futex_key(&key2);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
/*
* Support for robust futexes: the kernel cleans up held futexes at
* thread exit time.
*
* Implementation: user-space maintains a per-thread list of locks it
* is holding. Upon do_exit(), the kernel carefully walks this list,
* and marks all locks that are owned by this thread with the
* FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
* always manipulated with the lock held, so the list is private and
* per-thread. Userspace also maintains a per-thread 'list_op_pending'
* field, to allow the kernel to clean up if the thread dies after
* acquiring the lock, but just before it could have added itself to
* the list. There can only be one such pending lock.
*/
/**
* sys_set_robust_list() - Set the robust-futex list head of a task
* @head: pointer to the list-head
* @len: length of the list-head, as userspace expects
*/
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
size_t, len)
{
if (!futex_cmpxchg_enabled)
return -ENOSYS;
/*
* The kernel knows only one size for now:
*/
if (unlikely(len != sizeof(*head)))
return -EINVAL;
current->robust_list = head;
return 0;
}
/**
* sys_get_robust_list() - Get the robust-futex list head of a task
* @pid: pid of the process [zero for current task]
* @head_ptr: pointer to a list-head pointer, the kernel fills it in
* @len_ptr: pointer to a length field, the kernel fills in the header size
*/
SYSCALL_DEFINE3(get_robust_list, int, pid,
struct robust_list_head __user * __user *, head_ptr,
size_t __user *, len_ptr)
{
struct robust_list_head __user *head;
unsigned long ret;
struct task_struct *p;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
rcu_read_lock();
ret = -ESRCH;
if (!pid)
p = current;
else {
p = find_task_by_vpid(pid);
if (!p)
goto err_unlock;
}
ret = -EPERM;
if (!ptrace_may_access(p, PTRACE_MODE_READ))
goto err_unlock;
head = p->robust_list;
rcu_read_unlock();
if (put_user(sizeof(*head), len_ptr))
return -EFAULT;
return put_user(head, head_ptr);
err_unlock:
rcu_read_unlock();
return ret;
}
/*
* Process a futex-list entry, check whether it's owned by the
* dying task, and do notification if so:
*/
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
{
u32 uval, uninitialized_var(nval), mval;
retry:
if (get_user(uval, uaddr))
return -1;
if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
/*
* Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically
* via cmpxchg, and if the value had FUTEX_WAITERS
* set, wake up a waiter (if any). (We have to do a
* futex_wake() even if OWNER_DIED is already set -
* to handle the rare but possible case of recursive
* thread-death.) The rest of the cleanup is done in
* userspace.
*/
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
/*
* We are not holding a lock here, but we want to have
* the pagefault_disable/enable() protection because
* we want to handle the fault gracefully. If the
* access fails we try to fault in the futex with R/W
* verification via get_user_pages. get_user() above
* does not guarantee R/W access. If that fails we
* give up and leave the futex locked.
*/
if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
if (fault_in_user_writeable(uaddr))
return -1;
goto retry;
}
if (nval != uval)
goto retry;
/*
* Wake robust non-PI futexes here. The wakeup of
* PI futexes happens in exit_pi_state():
*/
if (!pi && (uval & FUTEX_WAITERS))
futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
}
return 0;
}
/*
* Fetch a robust-list pointer. Bit 0 signals PI futexes:
*/
static inline int fetch_robust_entry(struct robust_list __user **entry,
struct robust_list __user * __user *head,
unsigned int *pi)
{
unsigned long uentry;
if (get_user(uentry, (unsigned long __user *)head))
return -EFAULT;
*entry = (void __user *)(uentry & ~1UL);
*pi = uentry & 1;
return 0;
}
/*
* Walk curr->robust_list (very carefully, it's a userspace list!)
* and mark any locks found there dead, and notify any waiters.
*
* We silently return on any sign of list-walking problem.
*/
void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
unsigned int uninitialized_var(next_pi);
unsigned long futex_offset;
int rc;
if (!futex_cmpxchg_enabled)
return;
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
*/
if (fetch_robust_entry(&entry, &head->list.next, &pi))
return;
/*
* Fetch the relative futex offset:
*/
if (get_user(futex_offset, &head->futex_offset))
return;
/*
* Fetch any possibly pending lock-add first, and handle it
* if it exists:
*/
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
return;
next_entry = NULL; /* avoid warning with gcc */
while (entry != &head->list) {
/*
* Fetch the next entry in the list before calling
* handle_futex_death:
*/
rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
/*
* A pending lock might already be on the list, so
* don't process it twice:
*/
if (entry != pending)
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi))
return;
if (rc)
return;
entry = next_entry;
pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
if (!--limit)
break;
cond_resched();
}
if (pending)
handle_futex_death((void __user *)pending + futex_offset,
curr, pip);
}
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3)
{
int cmd = op & FUTEX_CMD_MASK;
unsigned int flags = 0;
if (!(op & FUTEX_PRIVATE_FLAG))
flags |= FLAGS_SHARED;
if (op & FUTEX_CLOCK_REALTIME) {
flags |= FLAGS_CLOCKRT;
if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
return -ENOSYS;
}
switch (cmd) {
case FUTEX_LOCK_PI:
case FUTEX_UNLOCK_PI:
case FUTEX_TRYLOCK_PI:
case FUTEX_WAIT_REQUEUE_PI:
case FUTEX_CMP_REQUEUE_PI:
if (!futex_cmpxchg_enabled)
return -ENOSYS;
}
switch (cmd) {
case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAIT_BITSET:
return futex_wait(uaddr, flags, val, timeout, val3);
case FUTEX_WAKE:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAKE_BITSET:
return futex_wake(uaddr, flags, val, val3);
case FUTEX_REQUEUE:
return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
case FUTEX_CMP_REQUEUE:
return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
case FUTEX_WAKE_OP:
return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
case FUTEX_LOCK_PI:
return futex_lock_pi(uaddr, flags, val, timeout, 0);
case FUTEX_UNLOCK_PI:
return futex_unlock_pi(uaddr, flags);
case FUTEX_TRYLOCK_PI:
return futex_lock_pi(uaddr, flags, 0, timeout, 1);
case FUTEX_WAIT_REQUEUE_PI:
val3 = FUTEX_BITSET_MATCH_ANY;
return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
uaddr2);
case FUTEX_CMP_REQUEUE_PI:
return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
}
return -ENOSYS;
}
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
struct timespec __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
struct timespec ts;
ktime_t t, *tp = NULL;
u32 val2 = 0;
int cmd = op & FUTEX_CMD_MASK;
if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
cmd == FUTEX_WAIT_BITSET ||
cmd == FUTEX_WAIT_REQUEUE_PI)) {
if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
return -EFAULT;
if (!timespec_valid(&ts))
return -EINVAL;
t = timespec_to_ktime(ts);
if (cmd == FUTEX_WAIT)
t = ktime_add_safe(ktime_get(), t);
tp = &t;
}
/*
* requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
* number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
*/
if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
val2 = (u32) (unsigned long) utime;
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval;
/*
* This will fail and we want it. Some arch implementations do
* runtime detection of the futex_atomic_cmpxchg_inatomic()
* functionality. We want to know that before we call in any
* of the complex code paths. Also we want to prevent
* registration of robust lists in that case. NULL is
* guaranteed to fault and we get -EFAULT on functional
* implementation, the non-functional ones will return
* -ENOSYS.
*/
if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
futex_cmpxchg_enabled = 1;
#endif
}
static int __init futex_init(void)
{
int i;
futex_detect_cmpxchg();
for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
plist_head_init(&futex_queues[i].chain);
spin_lock_init(&futex_queues[i].lock);
}
return 0;
}
__initcall(futex_init);
| Psycho666/Simplicity_trlte_kernel | kernel/futex.c | C | gpl-2.0 | 77,612 |
/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/platform_device.h>
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/android_pmem.h>
#include <linux/msm_rotator.h>
#include <linux/io.h>
#include <mach/msm_rotator_imem.h>
#include <linux/ktime.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/major.h>
#include <linux/regulator/consumer.h>
#include <linux/msm_ion.h>
#include <linux/sync.h>
#include <linux/sw_sync.h>
#ifdef CONFIG_MSM_BUS_SCALING
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#endif
#include <mach/msm_subsystem_map.h>
#include <mach/iommu_domains.h>
#define DRIVER_NAME "msm_rotator"
#define MSM_ROTATOR_BASE (msm_rotator_dev->io_base)
#define MSM_ROTATOR_INTR_ENABLE (MSM_ROTATOR_BASE+0x0020)
#define MSM_ROTATOR_INTR_STATUS (MSM_ROTATOR_BASE+0x0024)
#define MSM_ROTATOR_INTR_CLEAR (MSM_ROTATOR_BASE+0x0028)
#define MSM_ROTATOR_START (MSM_ROTATOR_BASE+0x0030)
#define MSM_ROTATOR_MAX_BURST_SIZE (MSM_ROTATOR_BASE+0x0050)
#define MSM_ROTATOR_HW_VERSION (MSM_ROTATOR_BASE+0x0070)
#define MSM_ROTATOR_SW_RESET (MSM_ROTATOR_BASE+0x0074)
#define MSM_ROTATOR_SRC_SIZE (MSM_ROTATOR_BASE+0x1108)
#define MSM_ROTATOR_SRCP0_ADDR (MSM_ROTATOR_BASE+0x110c)
#define MSM_ROTATOR_SRCP1_ADDR (MSM_ROTATOR_BASE+0x1110)
#define MSM_ROTATOR_SRCP2_ADDR (MSM_ROTATOR_BASE+0x1114)
#define MSM_ROTATOR_SRC_YSTRIDE1 (MSM_ROTATOR_BASE+0x111c)
#define MSM_ROTATOR_SRC_YSTRIDE2 (MSM_ROTATOR_BASE+0x1120)
#define MSM_ROTATOR_SRC_FORMAT (MSM_ROTATOR_BASE+0x1124)
#define MSM_ROTATOR_SRC_UNPACK_PATTERN1 (MSM_ROTATOR_BASE+0x1128)
#define MSM_ROTATOR_SUB_BLOCK_CFG (MSM_ROTATOR_BASE+0x1138)
#define MSM_ROTATOR_OUT_PACK_PATTERN1 (MSM_ROTATOR_BASE+0x1154)
#define MSM_ROTATOR_OUTP0_ADDR (MSM_ROTATOR_BASE+0x1168)
#define MSM_ROTATOR_OUTP1_ADDR (MSM_ROTATOR_BASE+0x116c)
#define MSM_ROTATOR_OUTP2_ADDR (MSM_ROTATOR_BASE+0x1170)
#define MSM_ROTATOR_OUT_YSTRIDE1 (MSM_ROTATOR_BASE+0x1178)
#define MSM_ROTATOR_OUT_YSTRIDE2 (MSM_ROTATOR_BASE+0x117c)
#define MSM_ROTATOR_SRC_XY (MSM_ROTATOR_BASE+0x1200)
#define MSM_ROTATOR_SRC_IMAGE_SIZE (MSM_ROTATOR_BASE+0x1208)
#define MSM_ROTATOR_MAX_ROT 0x07
#define MSM_ROTATOR_MAX_H 0x1fff
#define MSM_ROTATOR_MAX_W 0x1fff
/* from lsb to msb */
#define GET_PACK_PATTERN(a, x, y, z, bit) \
(((a)<<((bit)*3))|((x)<<((bit)*2))|((y)<<(bit))|(z))
#define CLR_G 0x0
#define CLR_B 0x1
#define CLR_R 0x2
#define CLR_ALPHA 0x3
#define CLR_Y CLR_G
#define CLR_CB CLR_B
#define CLR_CR CLR_R
#define ROTATIONS_TO_BITMASK(r) ((((r) & MDP_ROT_90) ? 1 : 0) | \
(((r) & MDP_FLIP_LR) ? 2 : 0) | \
(((r) & MDP_FLIP_UD) ? 4 : 0))
#define IMEM_NO_OWNER -1;
#define MAX_SESSIONS 16
#define INVALID_SESSION -1
#define VERSION_KEY_MASK 0xFFFFFF00
#define MAX_DOWNSCALE_RATIO 3
#define MAX_COMMIT_QUEUE 4
#define WAIT_ROT_TIMEOUT 1000
#define MAX_TIMELINE_NAME_LEN 16
#define WAIT_FENCE_FIRST_TIMEOUT MSEC_PER_SEC
#define WAIT_FENCE_FINAL_TIMEOUT (10 * MSEC_PER_SEC)
#define ROTATOR_REVISION_V0 0
#define ROTATOR_REVISION_V1 1
#define ROTATOR_REVISION_V2 2
#define ROTATOR_REVISION_NONE 0xffffffff
#define BASE_ADDR(height, y_stride) ((height % 64) * y_stride)
#define HW_BASE_ADDR(height, y_stride) (((dstp0_ystride >> 5) << 11) - \
((dst_height & 0x3f) * dstp0_ystride))
uint32_t rotator_hw_revision;
static char rot_iommu_split_domain;
/*
* rotator_hw_revision:
* 0 == 7x30
* 1 == 8x60
* 2 == 8960
*
*/
struct tile_parm {
unsigned int width; /* tile's width */
unsigned int height; /* tile's height */
unsigned int row_tile_w; /* tiles per row's width */
unsigned int row_tile_h; /* tiles per row's height */
};
struct msm_rotator_mem_planes {
unsigned int num_planes;
unsigned int plane_size[4];
unsigned int total_size;
};
#define checkoffset(offset, size, max_size) \
((size) > (max_size) || (offset) > ((max_size) - (size)))
struct msm_rotator_fd_info {
int pid;
int ref_cnt;
struct list_head list;
};
struct rot_sync_info {
u32 initialized;
struct sync_fence *acq_fen;
struct sync_fence *rel_fen;
int rel_fen_fd;
struct sw_sync_timeline *timeline;
int timeline_value;
struct mutex sync_mutex;
atomic_t queue_buf_cnt;
};
struct msm_rotator_session {
struct msm_rotator_img_info img_info;
struct msm_rotator_fd_info fd_info;
int fast_yuv_enable;
int enable_2pass;
u32 mem_hid;
};
struct msm_rotator_commit_info {
struct msm_rotator_data_info data_info;
struct msm_rotator_img_info img_info;
unsigned int format;
unsigned int in_paddr;
unsigned int out_paddr;
unsigned int in_chroma_paddr;
unsigned int out_chroma_paddr;
unsigned int in_chroma2_paddr;
unsigned int out_chroma2_paddr;
struct file *srcp0_file;
struct file *srcp1_file;
struct file *dstp0_file;
struct file *dstp1_file;
struct ion_handle *srcp0_ihdl;
struct ion_handle *srcp1_ihdl;
struct ion_handle *dstp0_ihdl;
struct ion_handle *dstp1_ihdl;
int ps0_need;
int session_index;
struct sync_fence *acq_fen;
int fast_yuv_en;
int enable_2pass;
};
struct msm_rotator_dev {
void __iomem *io_base;
int irq;
struct clk *core_clk;
struct msm_rotator_session *rot_session[MAX_SESSIONS];
struct list_head fd_list;
struct clk *pclk;
int rot_clk_state;
struct regulator *regulator;
struct delayed_work rot_clk_work;
struct clk *imem_clk;
int imem_clk_state;
struct delayed_work imem_clk_work;
struct platform_device *pdev;
struct cdev cdev;
struct device *device;
struct class *class;
dev_t dev_num;
int processing;
int last_session_idx;
struct mutex rotator_lock;
struct mutex imem_lock;
int imem_owner;
wait_queue_head_t wq;
struct ion_client *client;
#ifdef CONFIG_MSM_BUS_SCALING
uint32_t bus_client_handle;
#endif
u32 sec_mapped;
u32 mmu_clk_on;
struct rot_sync_info sync_info[MAX_SESSIONS];
/* non blocking */
struct mutex commit_mutex;
struct mutex commit_wq_mutex;
struct completion commit_comp;
u32 commit_running;
struct work_struct commit_work;
struct msm_rotator_commit_info commit_info[MAX_COMMIT_QUEUE];
atomic_t commit_q_r;
atomic_t commit_q_w;
atomic_t commit_q_cnt;
struct rot_buf_type *y_rot_buf;
struct rot_buf_type *chroma_rot_buf;
struct rot_buf_type *chroma2_rot_buf;
};
#define COMPONENT_5BITS 1
#define COMPONENT_6BITS 2
#define COMPONENT_8BITS 3
static struct msm_rotator_dev *msm_rotator_dev;
#define mrd msm_rotator_dev
static void rot_wait_for_commit_queue(u32 is_all);
enum {
CLK_EN,
CLK_DIS,
CLK_SUSPEND,
};
struct res_mmu_clk {
char *mmu_clk_name;
struct clk *mmu_clk;
};
static struct res_mmu_clk rot_mmu_clks[] = {
{"mdp_iommu_clk"}, {"rot_iommu_clk"},
{"vcodec_iommu0_clk"}, {"vcodec_iommu1_clk"},
{"smmu_iface_clk"}
};
u32 rotator_allocate_2pass_buf(struct rot_buf_type *rot_buf, int s_ndx)
{
ion_phys_addr_t addr, read_addr = 0;
size_t buffer_size;
unsigned long len;
if (!rot_buf) {
pr_err("Rot_buf NULL pointer %s %i", __func__, __LINE__);
return 0;
}
if (rot_buf->write_addr || !IS_ERR_OR_NULL(rot_buf->ihdl))
return 0;
buffer_size = roundup(1920 * 1088, SZ_4K);
if (!IS_ERR_OR_NULL(mrd->client)) {
pr_info("%s:%d ion based allocation\n",
__func__, __LINE__);
rot_buf->ihdl = ion_alloc(mrd->client, buffer_size, SZ_4K,
mrd->rot_session[s_ndx]->mem_hid,
mrd->rot_session[s_ndx]->mem_hid & ION_SECURE);
if (!IS_ERR_OR_NULL(rot_buf->ihdl)) {
if (rot_iommu_split_domain) {
if (ion_map_iommu(mrd->client, rot_buf->ihdl,
ROTATOR_SRC_DOMAIN, GEN_POOL, SZ_4K,
0, &read_addr, &len, 0, 0)) {
pr_err("ion_map_iommu() read failed\n");
return -ENOMEM;
}
if (mrd->rot_session[s_ndx]->mem_hid &
ION_SECURE) {
if (ion_phys(mrd->client, rot_buf->ihdl,
&addr, (size_t *)&len)) {
pr_err(
"%s:%d: ion_phys map failed\n",
__func__, __LINE__);
return -ENOMEM;
}
} else {
if (ion_map_iommu(mrd->client,
rot_buf->ihdl, ROTATOR_DST_DOMAIN,
GEN_POOL, SZ_4K, 0, &addr, &len,
0, 0)) {
pr_err("ion_map_iommu() failed\n");
return -ENOMEM;
}
}
} else {
if (ion_map_iommu(mrd->client, rot_buf->ihdl,
ROTATOR_SRC_DOMAIN, GEN_POOL, SZ_4K,
0, &addr, &len, 0, 0)) {
pr_err("ion_map_iommu() write failed\n");
return -ENOMEM;
}
}
} else {
pr_err("%s:%d: ion_alloc failed\n", __func__,
__LINE__);
return -ENOMEM;
}
} else {
addr = allocate_contiguous_memory_nomap(buffer_size,
mrd->rot_session[s_ndx]->mem_hid, 4);
}
if (addr) {
pr_info("allocating %d bytes at write=%x, read=%x for 2-pass\n",
buffer_size, (u32) addr, (u32) read_addr);
rot_buf->write_addr = addr;
if (read_addr)
rot_buf->read_addr = read_addr;
else
rot_buf->read_addr = rot_buf->write_addr;
return 0;
} else {
pr_err("%s cannot allocate memory for rotator 2-pass!\n",
__func__);
return -ENOMEM;
}
}
void rotator_free_2pass_buf(struct rot_buf_type *rot_buf, int s_ndx)
{
if (!rot_buf) {
pr_err("Rot_buf NULL pointer %s %i", __func__, __LINE__);
return;
}
if (!rot_buf->write_addr)
return;
if (!IS_ERR_OR_NULL(mrd->client)) {
if (!IS_ERR_OR_NULL(rot_buf->ihdl)) {
if (rot_iommu_split_domain) {
if (!(mrd->rot_session[s_ndx]->mem_hid &
ION_SECURE))
ion_unmap_iommu(mrd->client,
rot_buf->ihdl, ROTATOR_DST_DOMAIN,
GEN_POOL);
ion_unmap_iommu(mrd->client, rot_buf->ihdl,
ROTATOR_SRC_DOMAIN, GEN_POOL);
} else {
ion_unmap_iommu(mrd->client, rot_buf->ihdl,
ROTATOR_SRC_DOMAIN, GEN_POOL);
}
ion_free(mrd->client, rot_buf->ihdl);
rot_buf->ihdl = NULL;
pr_info("%s:%d Free rotator 2pass memory",
__func__, __LINE__);
}
} else {
if (rot_buf->write_addr) {
free_contiguous_memory_by_paddr(rot_buf->write_addr);
pr_debug("%s:%d Free rotator 2pass pmem\n", __func__,
__LINE__);
}
}
rot_buf->write_addr = 0;
rot_buf->read_addr = 0;
}
int msm_rotator_iommu_map_buf(int mem_id, int domain,
unsigned long *start, unsigned long *len,
struct ion_handle **pihdl, unsigned int secure)
{
if (!msm_rotator_dev->client)
return -EINVAL;
*pihdl = ion_import_dma_buf(msm_rotator_dev->client, mem_id);
if (IS_ERR_OR_NULL(*pihdl)) {
pr_err("ion_import_dma_buf() failed\n");
return PTR_ERR(*pihdl);
}
pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl, mem_id);
if (rot_iommu_split_domain) {
if (secure) {
if (ion_phys(msm_rotator_dev->client,
*pihdl, start, (unsigned *)len)) {
pr_err("%s:%d: ion_phys map failed\n",
__func__, __LINE__);
return -ENOMEM;
}
} else {
if (ion_map_iommu(msm_rotator_dev->client,
*pihdl, domain, GEN_POOL,
SZ_4K, 0, start, len, 0,
ION_IOMMU_UNMAP_DELAYED)) {
pr_err("ion_map_iommu() failed\n");
return -EINVAL;
}
}
} else {
if (ion_map_iommu(msm_rotator_dev->client,
*pihdl, ROTATOR_SRC_DOMAIN, GEN_POOL,
SZ_4K, 0, start, len, 0, ION_IOMMU_UNMAP_DELAYED)) {
pr_err("ion_map_iommu() failed\n");
return -EINVAL;
}
}
pr_debug("%s(): mem_id %d, start 0x%lx, len 0x%lx\n",
__func__, mem_id, *start, *len);
return 0;
}
int msm_rotator_imem_allocate(int requestor)
{
int rc = 0;
#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
switch (requestor) {
case ROTATOR_REQUEST:
if (mutex_trylock(&msm_rotator_dev->imem_lock)) {
msm_rotator_dev->imem_owner = ROTATOR_REQUEST;
rc = 1;
} else
rc = 0;
break;
case JPEG_REQUEST:
mutex_lock(&msm_rotator_dev->imem_lock);
msm_rotator_dev->imem_owner = JPEG_REQUEST;
rc = 1;
break;
default:
rc = 0;
}
#else
if (requestor == JPEG_REQUEST)
rc = 1;
#endif
if (rc == 1) {
cancel_delayed_work_sync(&msm_rotator_dev->imem_clk_work);
if (msm_rotator_dev->imem_clk_state != CLK_EN
&& msm_rotator_dev->imem_clk) {
clk_prepare_enable(msm_rotator_dev->imem_clk);
msm_rotator_dev->imem_clk_state = CLK_EN;
}
}
return rc;
}
EXPORT_SYMBOL(msm_rotator_imem_allocate);
void msm_rotator_imem_free(int requestor)
{
#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
if (msm_rotator_dev->imem_owner == requestor) {
schedule_delayed_work(&msm_rotator_dev->imem_clk_work, HZ);
mutex_unlock(&msm_rotator_dev->imem_lock);
}
#else
if (requestor == JPEG_REQUEST)
schedule_delayed_work(&msm_rotator_dev->imem_clk_work, HZ);
#endif
}
EXPORT_SYMBOL(msm_rotator_imem_free);
static void msm_rotator_imem_clk_work_f(struct work_struct *work)
{
#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
if (mutex_trylock(&msm_rotator_dev->imem_lock)) {
if (msm_rotator_dev->imem_clk_state == CLK_EN
&& msm_rotator_dev->imem_clk) {
clk_disable_unprepare(msm_rotator_dev->imem_clk);
msm_rotator_dev->imem_clk_state = CLK_DIS;
} else if (msm_rotator_dev->imem_clk_state == CLK_SUSPEND)
msm_rotator_dev->imem_clk_state = CLK_DIS;
mutex_unlock(&msm_rotator_dev->imem_lock);
}
#endif
}
/* enable clocks needed by rotator block */
static void enable_rot_clks(void)
{
if (msm_rotator_dev->regulator)
regulator_enable(msm_rotator_dev->regulator);
if (msm_rotator_dev->core_clk != NULL)
clk_prepare_enable(msm_rotator_dev->core_clk);
if (msm_rotator_dev->pclk != NULL)
clk_prepare_enable(msm_rotator_dev->pclk);
}
/* disable clocks needed by rotator block */
static void disable_rot_clks(void)
{
if (msm_rotator_dev->core_clk != NULL)
clk_disable_unprepare(msm_rotator_dev->core_clk);
if (msm_rotator_dev->pclk != NULL)
clk_disable_unprepare(msm_rotator_dev->pclk);
if (msm_rotator_dev->regulator)
regulator_disable(msm_rotator_dev->regulator);
}
static void msm_rotator_rot_clk_work_f(struct work_struct *work)
{
if (mutex_trylock(&msm_rotator_dev->rotator_lock)) {
if (msm_rotator_dev->rot_clk_state == CLK_EN) {
disable_rot_clks();
msm_rotator_dev->rot_clk_state = CLK_DIS;
} else if (msm_rotator_dev->rot_clk_state == CLK_SUSPEND)
msm_rotator_dev->rot_clk_state = CLK_DIS;
mutex_unlock(&msm_rotator_dev->rotator_lock);
}
}
static irqreturn_t msm_rotator_isr(int irq, void *dev_id)
{
if (msm_rotator_dev->processing) {
msm_rotator_dev->processing = 0;
wake_up(&msm_rotator_dev->wq);
} else
printk(KERN_WARNING "%s: unexpected interrupt\n", DRIVER_NAME);
return IRQ_HANDLED;
}
static void msm_rotator_signal_timeline(u32 session_index)
{
struct rot_sync_info *sync_info;
sync_info = &msm_rotator_dev->sync_info[session_index];
if ((!sync_info->timeline) || (!sync_info->initialized))
return;
mutex_lock(&sync_info->sync_mutex);
sw_sync_timeline_inc(sync_info->timeline, 1);
sync_info->timeline_value++;
mutex_unlock(&sync_info->sync_mutex);
}
static void msm_rotator_signal_timeline_done(u32 session_index)
{
struct rot_sync_info *sync_info;
sync_info = &msm_rotator_dev->sync_info[session_index];
if ((sync_info->timeline == NULL) ||
(sync_info->initialized == false))
return;
mutex_lock(&sync_info->sync_mutex);
sw_sync_timeline_inc(sync_info->timeline, 1);
sync_info->timeline_value++;
if (atomic_read(&sync_info->queue_buf_cnt) <= 0)
pr_err("%s queue_buf_cnt=%d", __func__,
atomic_read(&sync_info->queue_buf_cnt));
else
atomic_dec(&sync_info->queue_buf_cnt);
mutex_unlock(&sync_info->sync_mutex);
}
static void msm_rotator_release_acq_fence(u32 session_index)
{
struct rot_sync_info *sync_info;
sync_info = &msm_rotator_dev->sync_info[session_index];
if ((!sync_info->timeline) || (!sync_info->initialized))
return;
mutex_lock(&sync_info->sync_mutex);
sync_info->acq_fen = NULL;
mutex_unlock(&sync_info->sync_mutex);
}
static void msm_rotator_release_all_timeline(void)
{
int i;
struct rot_sync_info *sync_info;
for (i = 0; i < MAX_SESSIONS; i++) {
sync_info = &msm_rotator_dev->sync_info[i];
if (sync_info->initialized) {
msm_rotator_signal_timeline(i);
msm_rotator_release_acq_fence(i);
}
}
}
static void msm_rotator_wait_for_fence(struct sync_fence *acq_fen)
{
int ret;
if (acq_fen) {
ret = sync_fence_wait(acq_fen,
WAIT_FENCE_FIRST_TIMEOUT);
if (ret == -ETIME) {
pr_warn("%s: timeout, wait %ld more ms\n",
__func__, WAIT_FENCE_FINAL_TIMEOUT);
ret = sync_fence_wait(acq_fen,
WAIT_FENCE_FINAL_TIMEOUT);
}
if (ret < 0) {
pr_err("%s: sync_fence_wait failed! ret = %x\n",
__func__, ret);
}
sync_fence_put(acq_fen);
}
}
static int msm_rotator_buf_sync(unsigned long arg)
{
struct msm_rotator_buf_sync buf_sync;
int ret = 0;
struct sync_fence *fence = NULL;
struct rot_sync_info *sync_info;
struct sync_pt *rel_sync_pt;
struct sync_fence *rel_fence;
int rel_fen_fd;
u32 s;
if (copy_from_user(&buf_sync, (void __user *)arg, sizeof(buf_sync)))
return -EFAULT;
rot_wait_for_commit_queue(false);
for (s = 0; s < MAX_SESSIONS; s++)
if ((msm_rotator_dev->rot_session[s] != NULL) &&
(buf_sync.session_id ==
(unsigned int)msm_rotator_dev->rot_session[s]
))
break;
if (s == MAX_SESSIONS) {
pr_err("%s invalid session id %d", __func__,
buf_sync.session_id);
return -EINVAL;
}
sync_info = &msm_rotator_dev->sync_info[s];
if (sync_info->acq_fen)
pr_err("%s previous acq_fen will be overwritten", __func__);
if ((sync_info->timeline == NULL) ||
(sync_info->initialized == false))
return -EINVAL;
mutex_lock(&sync_info->sync_mutex);
if (buf_sync.acq_fen_fd >= 0)
fence = sync_fence_fdget(buf_sync.acq_fen_fd);
sync_info->acq_fen = fence;
if (sync_info->acq_fen &&
(buf_sync.flags & MDP_BUF_SYNC_FLAG_WAIT)) {
msm_rotator_wait_for_fence(sync_info->acq_fen);
sync_info->acq_fen = NULL;
}
rel_sync_pt = sw_sync_pt_create(sync_info->timeline,
sync_info->timeline_value +
atomic_read(&sync_info->queue_buf_cnt) + 1);
if (rel_sync_pt == NULL) {
pr_err("%s: cannot create sync point", __func__);
ret = -ENOMEM;
goto buf_sync_err_1;
}
/* create fence */
rel_fence = sync_fence_create("msm_rotator-fence",
rel_sync_pt);
if (rel_fence == NULL) {
sync_pt_free(rel_sync_pt);
pr_err("%s: cannot create fence", __func__);
ret = -ENOMEM;
goto buf_sync_err_1;
}
/* create fd */
rel_fen_fd = get_unused_fd_flags(0);
if (rel_fen_fd < 0) {
pr_err("%s: get_unused_fd_flags failed", __func__);
ret = -EIO;
goto buf_sync_err_2;
}
sync_fence_install(rel_fence, rel_fen_fd);
buf_sync.rel_fen_fd = rel_fen_fd;
sync_info->rel_fen = rel_fence;
sync_info->rel_fen_fd = rel_fen_fd;
ret = copy_to_user((void __user *)arg, &buf_sync, sizeof(buf_sync));
mutex_unlock(&sync_info->sync_mutex);
return ret;
buf_sync_err_2:
sync_fence_put(rel_fence);
buf_sync_err_1:
if (sync_info->acq_fen)
sync_fence_put(sync_info->acq_fen);
sync_info->acq_fen = NULL;
mutex_unlock(&sync_info->sync_mutex);
return ret;
}
static unsigned int tile_size(unsigned int src_width,
unsigned int src_height,
const struct tile_parm *tp)
{
unsigned int tile_w, tile_h;
unsigned int row_num_w, row_num_h;
tile_w = tp->width * tp->row_tile_w;
tile_h = tp->height * tp->row_tile_h;
row_num_w = (src_width + tile_w - 1) / tile_w;
row_num_h = (src_height + tile_h - 1) / tile_h;
return ((row_num_w * row_num_h * tile_w * tile_h) + 8191) & ~8191;
}
static int get_bpp(int format)
{
switch (format) {
case MDP_RGB_565:
case MDP_BGR_565:
return 2;
case MDP_XRGB_8888:
case MDP_ARGB_8888:
case MDP_RGBA_8888:
case MDP_BGRA_8888:
case MDP_RGBX_8888:
return 4;
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CB_CR_H2V2:
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CRCB_H2V2_TILE:
case MDP_Y_CBCR_H2V2_TILE:
return 1;
case MDP_RGB_888:
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
return 3;
case MDP_YCRYCB_H2V1:
return 2;/* YCrYCb interleave */
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
return 1;
default:
return -1;
}
}
static int msm_rotator_get_plane_sizes(uint32_t format, uint32_t w, uint32_t h,
struct msm_rotator_mem_planes *p)
{
/*
* each row of samsung tile consists of two tiles in height
* and two tiles in width which means width should align to
* 64 x 2 bytes and height should align to 32 x 2 bytes.
* video decoder generate two tiles in width and one tile
* in height which ends up height align to 32 X 1 bytes.
*/
const struct tile_parm tile = {64, 32, 2, 1};
int i;
if (p == NULL)
return -EINVAL;
if ((w > MSM_ROTATOR_MAX_W) || (h > MSM_ROTATOR_MAX_H))
return -ERANGE;
memset(p, 0, sizeof(*p));
switch (format) {
case MDP_XRGB_8888:
case MDP_ARGB_8888:
case MDP_RGBA_8888:
case MDP_BGRA_8888:
case MDP_RGBX_8888:
case MDP_RGB_888:
case MDP_RGB_565:
case MDP_BGR_565:
case MDP_YCRYCB_H2V1:
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
p->num_planes = 1;
p->plane_size[0] = w * h * get_bpp(format);
break;
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
case MDP_Y_CBCR_H1V2:
p->num_planes = 2;
p->plane_size[0] = w * h;
p->plane_size[1] = w * h;
break;
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H2V2:
p->num_planes = 2;
p->plane_size[0] = w * h;
p->plane_size[1] = w * h / 2;
break;
case MDP_Y_CRCB_H2V2_TILE:
case MDP_Y_CBCR_H2V2_TILE:
p->num_planes = 2;
p->plane_size[0] = tile_size(w, h, &tile);
p->plane_size[1] = tile_size(w, h/2, &tile);
break;
case MDP_Y_CB_CR_H2V2:
case MDP_Y_CR_CB_H2V2:
p->num_planes = 3;
p->plane_size[0] = w * h;
p->plane_size[1] = (w / 2) * (h / 2);
p->plane_size[2] = (w / 2) * (h / 2);
break;
case MDP_Y_CR_CB_GH2V2:
p->num_planes = 3;
p->plane_size[0] = ALIGN(w, 16) * h;
p->plane_size[1] = ALIGN(w / 2, 16) * (h / 2);
p->plane_size[2] = ALIGN(w / 2, 16) * (h / 2);
break;
default:
return -EINVAL;
}
for (i = 0; i < p->num_planes; i++)
p->total_size += p->plane_size[i];
return 0;
}
/* Checking invalid destination image size on FAST YUV for YUV420PP(NV12) with
* HW issue for rotation 90 + U/D filp + with/without flip operation
* (rotation 90 + U/D + L/R flip is rotation 270 degree option) and pix_rot
* block issue with tile line size is 4.
*
* Rotator structure is:
* if Fetch input image: W x H,
* Downscale: W` x H` = W/ScaleHor(2, 4 or 8) x H/ScaleVert(2, 4 or 8)
* Rotated output : W`` x H`` = (W` x H`) or (H` x W`) depends on "Rotation 90
* degree option"
*
* Pack: W`` x H``
*
* Rotator source ROI image width restriction is applied to W x H (case a,
* image resolution before downscaling)
*
* Packer source Image width/ height restriction are applied to W`` x H``
* (case c, image resolution after rotation)
*
* Supertile (64 x 8) and YUV (2 x 2) alignment restriction should be
* applied to the W x H (case a). Input image should be at least (2 x 2).
*
* "Support If packer source image height <= 256, multiple of 8", this
* restriction should be applied to the rotated image (W`` x H``)
*/
uint32_t fast_yuv_invalid_size_checker(unsigned char rot_mode,
uint32_t src_width,
uint32_t dst_width,
uint32_t src_height,
uint32_t dst_height,
uint32_t dstp0_ystride,
uint32_t is_planar420)
{
uint32_t hw_limit;
hw_limit = is_planar420 ? 512 : 256;
/* checking image constaints for missing EOT event from pix_rot block */
if ((src_width > hw_limit) && ((src_width % (hw_limit / 2)) == 8))
return -EINVAL;
if (rot_mode & MDP_ROT_90) {
if ((src_height % 128) == 8)
return -EINVAL;
/* if rotation 90 degree on fast yuv
* rotator image input width has to be multiple of 8
* rotator image input height has to be multiple of 8
*/
if (((dst_width % 8) != 0) || ((dst_height % 8) != 0))
return -EINVAL;
if ((rot_mode & MDP_FLIP_UD) ||
(rot_mode & (MDP_FLIP_UD | MDP_FLIP_LR))) {
/* image constraint checking for wrong address
* generation HW issue for Y plane checking
*/
if (((dst_height % 64) != 0) &&
((dst_height / 64) >= 4)) {
/* compare golden logic for second
* tile base address generation in row
* with actual HW implementation
*/
if (BASE_ADDR(dst_height, dstp0_ystride) !=
HW_BASE_ADDR(dst_height, dstp0_ystride))
return -EINVAL;
}
if (is_planar420) {
dst_width = dst_width / 2;
dstp0_ystride = dstp0_ystride / 2;
}
dst_height = dst_height / 2;
/* image constraint checking for wrong
* address generation HW issue. for
* U/V (P) or UV (PP) plane checking
*/
if (((dst_height % 64) != 0) && ((dst_height / 64) >=
(hw_limit / 128))) {
/* compare golden logic for
* second tile base address
* generation in row with
* actual HW implementation
*/
if (BASE_ADDR(dst_height, dstp0_ystride) !=
HW_BASE_ADDR(dst_height, dstp0_ystride))
return -EINVAL;
}
}
} else {
/* if NOT applying rotation 90 degree on fast yuv,
* rotator image input width has to be multiple of 8
* rotator image input height has to be multiple of 8
*/
if (((dst_width % 8) != 0) || ((dst_height % 8) != 0))
return -EINVAL;
}
return 0;
}
static int msm_rotator_ycxcx_h2v1(struct msm_rotator_img_info *info,
unsigned int in_paddr,
unsigned int out_paddr,
unsigned int use_imem,
int new_session,
unsigned int in_chroma_paddr,
unsigned int out_chroma_paddr)
{
int bpp;
uint32_t dst_format;
switch (info->src.format) {
case MDP_Y_CRCB_H2V1:
if (info->rotations & MDP_ROT_90)
dst_format = MDP_Y_CRCB_H1V2;
else
dst_format = info->src.format;
break;
case MDP_Y_CBCR_H2V1:
if (info->rotations & MDP_ROT_90)
dst_format = MDP_Y_CBCR_H1V2;
else
dst_format = info->src.format;
break;
default:
return -EINVAL;
}
if (info->dst.format != dst_format)
return -EINVAL;
bpp = get_bpp(info->src.format);
if (bpp < 0)
return -ENOTTY;
iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
iowrite32(in_chroma_paddr, MSM_ROTATOR_SRCP1_ADDR);
iowrite32(out_paddr +
((info->dst_y * info->dst.width) + info->dst_x),
MSM_ROTATOR_OUTP0_ADDR);
iowrite32(out_chroma_paddr +
((info->dst_y * info->dst.width) + info->dst_x),
MSM_ROTATOR_OUTP1_ADDR);
if (new_session) {
iowrite32(info->src.width |
info->src.width << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
if (info->rotations & MDP_ROT_90)
iowrite32(info->dst.width |
info->dst.width*2 << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
else
iowrite32(info->dst.width |
info->dst.width << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
if (info->src.format == MDP_Y_CBCR_H2V1) {
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
} else {
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
}
iowrite32((1 << 18) | /* chroma sampling 1=H2V1 */
(ROTATIONS_TO_BITMASK(info->rotations) << 9) |
1 << 8 | /* ROT_EN */
info->downscale_ratio << 2 | /* downscale v ratio */
info->downscale_ratio, /* downscale h ratio */
MSM_ROTATOR_SUB_BLOCK_CFG);
iowrite32(0 << 29 | /* frame format 0 = linear */
(use_imem ? 0 : 1) << 22 | /* tile size */
2 << 19 | /* fetch planes 2 = pseudo */
0 << 18 | /* unpack align */
1 << 17 | /* unpack tight */
1 << 13 | /* unpack count 0=1 component */
(bpp-1) << 9 | /* src Bpp 0=1 byte ... */
0 << 8 | /* has alpha */
0 << 6 | /* alpha bits 3=8bits */
3 << 4 | /* R/Cr bits 1=5 2=6 3=8 */
3 << 2 | /* B/Cb bits 1=5 2=6 3=8 */
3 << 0, /* G/Y bits 1=5 2=6 3=8 */
MSM_ROTATOR_SRC_FORMAT);
}
return 0;
}
static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info,
unsigned int in_paddr,
unsigned int out_paddr,
unsigned int use_imem,
int new_session,
unsigned int in_chroma_paddr,
unsigned int out_chroma_paddr,
unsigned int in_chroma2_paddr,
unsigned int out_chroma2_paddr,
int fast_yuv_en)
{
uint32_t dst_format;
int is_tile = 0;
switch (info->src.format) {
case MDP_Y_CRCB_H2V2_TILE:
is_tile = 1;
dst_format = MDP_Y_CRCB_H2V2;
break;
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
if (fast_yuv_en) {
dst_format = info->src.format;
break;
}
case MDP_Y_CRCB_H2V2:
dst_format = MDP_Y_CRCB_H2V2;
break;
case MDP_Y_CB_CR_H2V2:
if (fast_yuv_en) {
dst_format = info->src.format;
break;
}
dst_format = MDP_Y_CBCR_H2V2;
break;
case MDP_Y_CBCR_H2V2_TILE:
is_tile = 1;
case MDP_Y_CBCR_H2V2:
dst_format = MDP_Y_CBCR_H2V2;
break;
default:
return -EINVAL;
}
if (info->dst.format != dst_format)
return -EINVAL;
/* rotator expects YCbCr for planar input format */
if ((info->src.format == MDP_Y_CR_CB_H2V2 ||
info->src.format == MDP_Y_CR_CB_GH2V2) &&
rotator_hw_revision < ROTATOR_REVISION_V2)
swap(in_chroma_paddr, in_chroma2_paddr);
iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
iowrite32(in_chroma_paddr, MSM_ROTATOR_SRCP1_ADDR);
iowrite32(in_chroma2_paddr, MSM_ROTATOR_SRCP2_ADDR);
iowrite32(out_paddr +
((info->dst_y * info->dst.width) + info->dst_x),
MSM_ROTATOR_OUTP0_ADDR);
iowrite32(out_chroma_paddr +
(((info->dst_y * info->dst.width)/2) + info->dst_x),
MSM_ROTATOR_OUTP1_ADDR);
if (out_chroma2_paddr)
iowrite32(out_chroma2_paddr +
(((info->dst_y * info->dst.width)/2) + info->dst_x),
MSM_ROTATOR_OUTP2_ADDR);
if (new_session) {
if (in_chroma2_paddr) {
if (info->src.format == MDP_Y_CR_CB_GH2V2) {
iowrite32(ALIGN(info->src.width, 16) |
ALIGN((info->src.width / 2), 16) << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
iowrite32(ALIGN((info->src.width / 2), 16),
MSM_ROTATOR_SRC_YSTRIDE2);
} else {
iowrite32(info->src.width |
(info->src.width / 2) << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
iowrite32((info->src.width / 2),
MSM_ROTATOR_SRC_YSTRIDE2);
}
} else {
iowrite32(info->src.width |
info->src.width << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
}
if (out_chroma2_paddr) {
if (info->dst.format == MDP_Y_CR_CB_GH2V2) {
iowrite32(ALIGN(info->dst.width, 16) |
ALIGN((info->dst.width / 2), 16) << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
iowrite32(ALIGN((info->dst.width / 2), 16),
MSM_ROTATOR_OUT_YSTRIDE2);
} else {
iowrite32(info->dst.width |
info->dst.width/2 << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
iowrite32(info->dst.width/2,
MSM_ROTATOR_OUT_YSTRIDE2);
}
} else {
iowrite32(info->dst.width |
info->dst.width << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
}
if (dst_format == MDP_Y_CBCR_H2V2 ||
dst_format == MDP_Y_CB_CR_H2V2) {
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
} else {
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
}
iowrite32((3 << 18) | /* chroma sampling 3=4:2:0 */
(ROTATIONS_TO_BITMASK(info->rotations) << 9) |
1 << 8 | /* ROT_EN */
fast_yuv_en << 4 | /*fast YUV*/
info->downscale_ratio << 2 | /* downscale v ratio */
info->downscale_ratio, /* downscale h ratio */
MSM_ROTATOR_SUB_BLOCK_CFG);
iowrite32((is_tile ? 2 : 0) << 29 | /* frame format */
(use_imem ? 0 : 1) << 22 | /* tile size */
(in_chroma2_paddr ? 1 : 2) << 19 | /* fetch planes */
0 << 18 | /* unpack align */
1 << 17 | /* unpack tight */
1 << 13 | /* unpack count 0=1 component */
0 << 9 | /* src Bpp 0=1 byte ... */
0 << 8 | /* has alpha */
0 << 6 | /* alpha bits 3=8bits */
3 << 4 | /* R/Cr bits 1=5 2=6 3=8 */
3 << 2 | /* B/Cb bits 1=5 2=6 3=8 */
3 << 0, /* G/Y bits 1=5 2=6 3=8 */
MSM_ROTATOR_SRC_FORMAT);
}
return 0;
}
static int msm_rotator_ycxcx_h2v2_2pass(struct msm_rotator_img_info *info,
unsigned int in_paddr,
unsigned int out_paddr,
unsigned int use_imem,
int new_session,
unsigned int in_chroma_paddr,
unsigned int out_chroma_paddr,
unsigned int in_chroma2_paddr,
unsigned int out_chroma2_paddr,
int fast_yuv_en,
int enable_2pass,
int session_index)
{
uint32_t pass2_src_format, pass1_dst_format, dst_format;
int is_tile = 0, post_pass1_buf_is_planar = 0;
unsigned int status;
int post_pass1_ystride = info->src_rect.w >> info->downscale_ratio;
int post_pass1_height = info->src_rect.h >> info->downscale_ratio;
/* DST format = SRC format for non-tiled SRC formats
* when fast YUV is enabled. For TILED formats,
* DST format of MDP_Y_CRCB_H2V2_TILE = MDP_Y_CRCB_H2V2
* DST format of MDP_Y_CBCR_H2V2_TILE = MDP_Y_CBCR_H2V2
*/
switch (info->src.format) {
case MDP_Y_CRCB_H2V2_TILE:
is_tile = 1;
dst_format = MDP_Y_CRCB_H2V2;
pass1_dst_format = MDP_Y_CRCB_H2V2;
pass2_src_format = pass1_dst_format;
break;
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CB_CR_H2V2:
post_pass1_buf_is_planar = 1;
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
dst_format = info->src.format;
pass1_dst_format = info->src.format;
pass2_src_format = pass1_dst_format;
break;
case MDP_Y_CBCR_H2V2_TILE:
is_tile = 1;
dst_format = MDP_Y_CBCR_H2V2;
pass1_dst_format = info->src.format;
pass2_src_format = pass1_dst_format;
break;
default:
return -EINVAL;
}
if (info->dst.format != dst_format)
return -EINVAL;
/* Beginning of Pass-1 */
/* rotator expects YCbCr for planar input format */
if ((info->src.format == MDP_Y_CR_CB_H2V2 ||
info->src.format == MDP_Y_CR_CB_GH2V2) &&
rotator_hw_revision < ROTATOR_REVISION_V2)
swap(in_chroma_paddr, in_chroma2_paddr);
iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
iowrite32(in_chroma_paddr, MSM_ROTATOR_SRCP1_ADDR);
iowrite32(in_chroma2_paddr, MSM_ROTATOR_SRCP2_ADDR);
if (new_session) {
if (in_chroma2_paddr) {
if (info->src.format == MDP_Y_CR_CB_GH2V2) {
iowrite32(ALIGN(info->src.width, 16) |
ALIGN((info->src.width / 2), 16) << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
iowrite32(ALIGN((info->src.width / 2), 16),
MSM_ROTATOR_SRC_YSTRIDE2);
} else {
iowrite32(info->src.width |
(info->src.width / 2) << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
iowrite32((info->src.width / 2),
MSM_ROTATOR_SRC_YSTRIDE2);
}
} else {
iowrite32(info->src.width |
info->src.width << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
}
}
pr_debug("src_rect.w=%i src_rect.h=%i src_rect.x=%i src_rect.y=%i",
info->src_rect.w, info->src_rect.h, info->src_rect.x,
info->src_rect.y);
pr_debug("src.width=%i src.height=%i src_format=%i",
info->src.width, info->src.height, info->src.format);
pr_debug("dst_width=%i dst_height=%i dst.x=%i dst.y=%i",
info->dst.width, info->dst.height, info->dst_x, info->dst_y);
pr_debug("post_pass1_ystride=%i post_pass1_height=%i downscale=%i",
post_pass1_ystride, post_pass1_height, info->downscale_ratio);
rotator_allocate_2pass_buf(mrd->y_rot_buf, session_index);
rotator_allocate_2pass_buf(mrd->chroma_rot_buf, session_index);
if (post_pass1_buf_is_planar)
rotator_allocate_2pass_buf(mrd->chroma2_rot_buf, session_index);
iowrite32(mrd->y_rot_buf->write_addr, MSM_ROTATOR_OUTP0_ADDR);
iowrite32(mrd->chroma_rot_buf->write_addr, MSM_ROTATOR_OUTP1_ADDR);
if (post_pass1_buf_is_planar)
iowrite32(mrd->chroma2_rot_buf->write_addr,
MSM_ROTATOR_OUTP2_ADDR);
if (post_pass1_buf_is_planar) {
if (pass1_dst_format == MDP_Y_CR_CB_GH2V2) {
iowrite32(ALIGN(post_pass1_ystride, 16) |
ALIGN((post_pass1_ystride / 2), 16) << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
iowrite32(ALIGN((post_pass1_ystride / 2), 16),
MSM_ROTATOR_OUT_YSTRIDE2);
} else {
iowrite32(post_pass1_ystride |
post_pass1_ystride / 2 << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
iowrite32(post_pass1_ystride / 2,
MSM_ROTATOR_OUT_YSTRIDE2);
}
} else {
iowrite32(post_pass1_ystride |
post_pass1_ystride << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
}
if (pass1_dst_format == MDP_Y_CBCR_H2V2 ||
pass1_dst_format == MDP_Y_CB_CR_H2V2) {
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
} else {
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
}
iowrite32((3 << 18) | /* chroma sampling 3=4:2:0 */
0 << 9 | /* Pass-1 No Rotation */
1 << 8 | /* ROT_EN */
fast_yuv_en << 4 | /*fast YUV*/
info->downscale_ratio << 2 | /* downscale v ratio */
info->downscale_ratio, /* downscale h ratio */
MSM_ROTATOR_SUB_BLOCK_CFG);
iowrite32((is_tile ? 2 : 0) << 29 | /* frame format */
(use_imem ? 0 : 1) << 22 | /* tile size */
(in_chroma2_paddr ? 1 : 2) << 19 | /* fetch planes */
0 << 18 | /* unpack align */
1 << 17 | /* unpack tight */
1 << 13 | /* unpack count 0=1 component */
0 << 9 | /* src Bpp 0=1 byte ... */
0 << 8 | /* has alpha */
0 << 6 | /* alpha bits 3=8bits */
3 << 4 | /* R/Cr bits 1=5 2=6 3=8 */
3 << 2 | /* B/Cb bits 1=5 2=6 3=8 */
3 << 0, /* G/Y bits 1=5 2=6 3=8 */
MSM_ROTATOR_SRC_FORMAT);
iowrite32(3, MSM_ROTATOR_INTR_ENABLE);
msm_rotator_dev->processing = 1;
iowrite32(0x1, MSM_ROTATOR_START);
/* End of Pass-1 */
wait_event(msm_rotator_dev->wq,
(msm_rotator_dev->processing == 0));
/* Beginning of Pass-2 */
status = (unsigned char)ioread32(MSM_ROTATOR_INTR_STATUS);
if ((status & 0x03) != 0x01) {
pr_err("%s(): AXI Bus Error, issuing SW_RESET\n",
__func__);
iowrite32(0x1, MSM_ROTATOR_SW_RESET);
}
iowrite32(0, MSM_ROTATOR_INTR_ENABLE);
iowrite32(3, MSM_ROTATOR_INTR_CLEAR);
if (use_imem)
iowrite32(0x42, MSM_ROTATOR_MAX_BURST_SIZE);
iowrite32(((post_pass1_height & 0x1fff)
<< 16) |
(post_pass1_ystride & 0x1fff),
MSM_ROTATOR_SRC_SIZE);
iowrite32(0 << 16 | 0,
MSM_ROTATOR_SRC_XY);
iowrite32(((post_pass1_height & 0x1fff)
<< 16) |
(post_pass1_ystride & 0x1fff),
MSM_ROTATOR_SRC_IMAGE_SIZE);
/* rotator expects YCbCr for planar input format */
if ((pass2_src_format == MDP_Y_CR_CB_H2V2 ||
pass2_src_format == MDP_Y_CR_CB_GH2V2) &&
rotator_hw_revision < ROTATOR_REVISION_V2)
swap(mrd->chroma_rot_buf->read_addr,
mrd->chroma2_rot_buf->read_addr);
iowrite32(mrd->y_rot_buf->read_addr, MSM_ROTATOR_SRCP0_ADDR);
iowrite32(mrd->chroma_rot_buf->read_addr,
MSM_ROTATOR_SRCP1_ADDR);
if (mrd->chroma2_rot_buf->read_addr)
iowrite32(mrd->chroma2_rot_buf->read_addr,
MSM_ROTATOR_SRCP2_ADDR);
if (post_pass1_buf_is_planar) {
if (pass2_src_format == MDP_Y_CR_CB_GH2V2) {
iowrite32(ALIGN(post_pass1_ystride, 16) |
ALIGN((post_pass1_ystride / 2), 16) << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
iowrite32(ALIGN((post_pass1_ystride / 2), 16),
MSM_ROTATOR_SRC_YSTRIDE2);
} else {
iowrite32(post_pass1_ystride |
(post_pass1_ystride / 2) << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
iowrite32((post_pass1_ystride / 2),
MSM_ROTATOR_SRC_YSTRIDE2);
}
} else {
iowrite32(post_pass1_ystride |
post_pass1_ystride << 16,
MSM_ROTATOR_SRC_YSTRIDE1);
}
iowrite32(out_paddr +
((info->dst_y * info->dst.width) + info->dst_x),
MSM_ROTATOR_OUTP0_ADDR);
iowrite32(out_chroma_paddr +
(((info->dst_y * info->dst.width)/2) + info->dst_x),
MSM_ROTATOR_OUTP1_ADDR);
if (out_chroma2_paddr)
iowrite32(out_chroma2_paddr +
(((info->dst_y * info->dst.width)/2) + info->dst_x),
MSM_ROTATOR_OUTP2_ADDR);
if (new_session) {
if (out_chroma2_paddr) {
if (info->dst.format == MDP_Y_CR_CB_GH2V2) {
iowrite32(ALIGN(info->dst.width, 16) |
ALIGN((info->dst.width / 2), 16) << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
iowrite32(ALIGN((info->dst.width / 2), 16),
MSM_ROTATOR_OUT_YSTRIDE2);
} else {
iowrite32(info->dst.width |
info->dst.width/2 << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
iowrite32(info->dst.width/2,
MSM_ROTATOR_OUT_YSTRIDE2);
}
} else {
iowrite32(info->dst.width |
info->dst.width << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
}
if (dst_format == MDP_Y_CBCR_H2V2 ||
dst_format == MDP_Y_CB_CR_H2V2) {
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
} else {
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
}
iowrite32((3 << 18) | /* chroma sampling 3=4:2:0 */
(ROTATIONS_TO_BITMASK(info->rotations) << 9) |
1 << 8 | /* ROT_EN */
fast_yuv_en << 4 | /*fast YUV*/
0 << 2 | /* No downscale in Pass-2 */
0, /* No downscale in Pass-2 */
MSM_ROTATOR_SUB_BLOCK_CFG);
iowrite32(0 << 29 |
/* Output of Pass-1 will always be non-tiled */
(use_imem ? 0 : 1) << 22 | /* tile size */
(in_chroma2_paddr ? 1 : 2) << 19 | /* fetch planes */
0 << 18 | /* unpack align */
1 << 17 | /* unpack tight */
1 << 13 | /* unpack count 0=1 component */
0 << 9 | /* src Bpp 0=1 byte ... */
0 << 8 | /* has alpha */
0 << 6 | /* alpha bits 3=8bits */
3 << 4 | /* R/Cr bits 1=5 2=6 3=8 */
3 << 2 | /* B/Cb bits 1=5 2=6 3=8 */
3 << 0, /* G/Y bits 1=5 2=6 3=8 */
MSM_ROTATOR_SRC_FORMAT);
}
return 0;
}
static int msm_rotator_ycrycb(struct msm_rotator_img_info *info,
unsigned int in_paddr,
unsigned int out_paddr,
unsigned int use_imem,
int new_session,
unsigned int out_chroma_paddr)
{
int bpp;
uint32_t dst_format;
if (info->src.format == MDP_YCRYCB_H2V1) {
if (info->rotations & MDP_ROT_90)
dst_format = MDP_Y_CRCB_H1V2;
else
dst_format = MDP_Y_CRCB_H2V1;
} else
return -EINVAL;
if (info->dst.format != dst_format)
return -EINVAL;
bpp = get_bpp(info->src.format);
if (bpp < 0)
return -ENOTTY;
iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
iowrite32(out_paddr +
((info->dst_y * info->dst.width) + info->dst_x),
MSM_ROTATOR_OUTP0_ADDR);
iowrite32(out_chroma_paddr +
((info->dst_y * info->dst.width)/2 + info->dst_x),
MSM_ROTATOR_OUTP1_ADDR);
if (new_session) {
iowrite32(info->src.width * bpp,
MSM_ROTATOR_SRC_YSTRIDE1);
if (info->rotations & MDP_ROT_90)
iowrite32(info->dst.width |
(info->dst.width*2) << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
else
iowrite32(info->dst.width |
(info->dst.width) << 16,
MSM_ROTATOR_OUT_YSTRIDE1);
iowrite32(GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
iowrite32((1 << 18) | /* chroma sampling 1=H2V1 */
(ROTATIONS_TO_BITMASK(info->rotations) << 9) |
1 << 8 | /* ROT_EN */
info->downscale_ratio << 2 | /* downscale v ratio */
info->downscale_ratio, /* downscale h ratio */
MSM_ROTATOR_SUB_BLOCK_CFG);
iowrite32(0 << 29 | /* frame format 0 = linear */
(use_imem ? 0 : 1) << 22 | /* tile size */
0 << 19 | /* fetch planes 0=interleaved */
0 << 18 | /* unpack align */
1 << 17 | /* unpack tight */
3 << 13 | /* unpack count 0=1 component */
(bpp-1) << 9 | /* src Bpp 0=1 byte ... */
0 << 8 | /* has alpha */
0 << 6 | /* alpha bits 3=8bits */
3 << 4 | /* R/Cr bits 1=5 2=6 3=8 */
3 << 2 | /* B/Cb bits 1=5 2=6 3=8 */
3 << 0, /* G/Y bits 1=5 2=6 3=8 */
MSM_ROTATOR_SRC_FORMAT);
}
return 0;
}
static int msm_rotator_rgb_types(struct msm_rotator_img_info *info,
unsigned int in_paddr,
unsigned int out_paddr,
unsigned int use_imem,
int new_session)
{
int bpp, abits, rbits, gbits, bbits;
if (info->src.format != info->dst.format)
return -EINVAL;
bpp = get_bpp(info->src.format);
if (bpp < 0)
return -ENOTTY;
iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
iowrite32(out_paddr +
((info->dst_y * info->dst.width) + info->dst_x) * bpp,
MSM_ROTATOR_OUTP0_ADDR);
if (new_session) {
iowrite32(info->src.width * bpp, MSM_ROTATOR_SRC_YSTRIDE1);
iowrite32(info->dst.width * bpp, MSM_ROTATOR_OUT_YSTRIDE1);
iowrite32((0 << 18) | /* chroma sampling 0=rgb */
(ROTATIONS_TO_BITMASK(info->rotations) << 9) |
1 << 8 | /* ROT_EN */
info->downscale_ratio << 2 | /* downscale v ratio */
info->downscale_ratio, /* downscale h ratio */
MSM_ROTATOR_SUB_BLOCK_CFG);
switch (info->src.format) {
case MDP_RGB_565:
iowrite32(GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
abits = 0;
rbits = COMPONENT_5BITS;
gbits = COMPONENT_6BITS;
bbits = COMPONENT_5BITS;
break;
case MDP_BGR_565:
iowrite32(GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
abits = 0;
rbits = COMPONENT_5BITS;
gbits = COMPONENT_6BITS;
bbits = COMPONENT_5BITS;
break;
case MDP_RGB_888:
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
iowrite32(GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
abits = 0;
rbits = COMPONENT_8BITS;
gbits = COMPONENT_8BITS;
bbits = COMPONENT_8BITS;
break;
case MDP_ARGB_8888:
case MDP_RGBA_8888:
case MDP_XRGB_8888:
case MDP_RGBX_8888:
iowrite32(GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G,
CLR_B, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G,
CLR_B, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
abits = COMPONENT_8BITS;
rbits = COMPONENT_8BITS;
gbits = COMPONENT_8BITS;
bbits = COMPONENT_8BITS;
break;
case MDP_BGRA_8888:
iowrite32(GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G,
CLR_R, 8),
MSM_ROTATOR_SRC_UNPACK_PATTERN1);
iowrite32(GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G,
CLR_R, 8),
MSM_ROTATOR_OUT_PACK_PATTERN1);
abits = COMPONENT_8BITS;
rbits = COMPONENT_8BITS;
gbits = COMPONENT_8BITS;
bbits = COMPONENT_8BITS;
break;
default:
return -EINVAL;
}
iowrite32(0 << 29 | /* frame format 0 = linear */
(use_imem ? 0 : 1) << 22 | /* tile size */
0 << 19 | /* fetch planes 0=interleaved */
0 << 18 | /* unpack align */
1 << 17 | /* unpack tight */
(abits ? 3 : 2) << 13 | /* unpack count 0=1 comp */
(bpp-1) << 9 | /* src Bpp 0=1 byte ... */
(abits ? 1 : 0) << 8 | /* has alpha */
abits << 6 | /* alpha bits 3=8bits */
rbits << 4 | /* R/Cr bits 1=5 2=6 3=8 */
bbits << 2 | /* B/Cb bits 1=5 2=6 3=8 */
gbits << 0, /* G/Y bits 1=5 2=6 3=8 */
MSM_ROTATOR_SRC_FORMAT);
}
return 0;
}
static int get_img(struct msmfb_data *fbd, int domain,
unsigned long *start, unsigned long *len, struct file **p_file,
int *p_need, struct ion_handle **p_ihdl, unsigned int secure)
{
int ret = 0;
#ifdef CONFIG_FB
struct file *file = NULL;
int put_needed, fb_num;
#endif
#ifdef CONFIG_ANDROID_PMEM
unsigned long vstart;
#endif
*p_need = 0;
#ifdef CONFIG_FB
if (fbd->flags & MDP_MEMORY_ID_TYPE_FB) {
file = fget_light(fbd->memory_id, &put_needed);
if (file == NULL) {
pr_err("fget_light returned NULL\n");
return -EINVAL;
}
if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
fb_num = MINOR(file->f_dentry->d_inode->i_rdev);
if (get_fb_phys_info(start, len, fb_num,
ROTATOR_SUBSYSTEM_ID)) {
pr_err("get_fb_phys_info() failed\n");
ret = -1;
} else {
*p_file = file;
*p_need = put_needed;
}
} else {
pr_err("invalid FB_MAJOR failed\n");
ret = -1;
}
if (ret)
fput_light(file, put_needed);
return ret;
}
#endif
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
return msm_rotator_iommu_map_buf(fbd->memory_id, domain, start,
len, p_ihdl, secure);
#endif
#ifdef CONFIG_ANDROID_PMEM
if (!get_pmem_file(fbd->memory_id, start, &vstart, len, p_file))
return 0;
else
return -ENOMEM;
#endif
}
static void put_img(struct file *p_file, struct ion_handle *p_ihdl,
int domain, unsigned int secure)
{
#ifdef CONFIG_ANDROID_PMEM
if (p_file != NULL)
put_pmem_file(p_file);
#endif
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
if (!IS_ERR_OR_NULL(p_ihdl)) {
pr_debug("%s(): p_ihdl %p\n", __func__, p_ihdl);
if (rot_iommu_split_domain) {
if (!secure)
ion_unmap_iommu(msm_rotator_dev->client,
p_ihdl, domain, GEN_POOL);
} else {
ion_unmap_iommu(msm_rotator_dev->client,
p_ihdl, ROTATOR_SRC_DOMAIN, GEN_POOL);
}
ion_free(msm_rotator_dev->client, p_ihdl);
}
#endif
}
static int msm_rotator_rotate_prepare(
struct msm_rotator_data_info *data_info,
struct msm_rotator_commit_info *commit_info)
{
unsigned int format;
struct msm_rotator_data_info info;
unsigned int in_paddr, out_paddr;
unsigned long src_len, dst_len;
int rc = 0, s;
struct file *srcp0_file = NULL, *dstp0_file = NULL;
struct file *srcp1_file = NULL, *dstp1_file = NULL;
struct ion_handle *srcp0_ihdl = NULL, *dstp0_ihdl = NULL;
struct ion_handle *srcp1_ihdl = NULL, *dstp1_ihdl = NULL;
int ps0_need, p_need;
unsigned int in_chroma_paddr = 0, out_chroma_paddr = 0;
unsigned int in_chroma2_paddr = 0, out_chroma2_paddr = 0;
struct msm_rotator_img_info *img_info;
struct msm_rotator_mem_planes src_planes, dst_planes;
mutex_lock(&msm_rotator_dev->rotator_lock);
info = *data_info;
for (s = 0; s < MAX_SESSIONS; s++)
if ((msm_rotator_dev->rot_session[s] != NULL) &&
(info.session_id ==
(unsigned int)msm_rotator_dev->rot_session[s]
))
break;
if (s == MAX_SESSIONS) {
pr_err("%s() : Attempt to use invalid session_id %d\n",
__func__, s);
rc = -EINVAL;
mutex_unlock(&msm_rotator_dev->rotator_lock);
return rc;
}
img_info = &(msm_rotator_dev->rot_session[s]->img_info);
if (img_info->enable == 0) {
dev_dbg(msm_rotator_dev->device,
"%s() : Session_id %d not enabled\n", __func__, s);
rc = -EINVAL;
mutex_unlock(&msm_rotator_dev->rotator_lock);
return rc;
}
if (msm_rotator_get_plane_sizes(img_info->src.format,
img_info->src.width,
img_info->src.height,
&src_planes)) {
pr_err("%s: invalid src format\n", __func__);
rc = -EINVAL;
mutex_unlock(&msm_rotator_dev->rotator_lock);
return rc;
}
if (msm_rotator_get_plane_sizes(img_info->dst.format,
img_info->dst.width,
img_info->dst.height,
&dst_planes)) {
pr_err("%s: invalid dst format\n", __func__);
rc = -EINVAL;
mutex_unlock(&msm_rotator_dev->rotator_lock);
return rc;
}
rc = get_img(&info.src, ROTATOR_SRC_DOMAIN, (unsigned long *)&in_paddr,
(unsigned long *)&src_len, &srcp0_file, &ps0_need,
&srcp0_ihdl, 0);
if (rc) {
pr_err("%s: in get_img() failed id=0x%08x\n",
DRIVER_NAME, info.src.memory_id);
goto rotate_prepare_error;
}
rc = get_img(&info.dst, ROTATOR_DST_DOMAIN, (unsigned long *)&out_paddr,
(unsigned long *)&dst_len, &dstp0_file, &p_need,
&dstp0_ihdl, img_info->secure);
if (rc) {
pr_err("%s: out get_img() failed id=0x%08x\n",
DRIVER_NAME, info.dst.memory_id);
goto rotate_prepare_error;
}
format = img_info->src.format;
if (((info.version_key & VERSION_KEY_MASK) == 0xA5B4C300) &&
((info.version_key & ~VERSION_KEY_MASK) > 0) &&
(src_planes.num_planes == 2)) {
if (checkoffset(info.src.offset,
src_planes.plane_size[0],
src_len)) {
pr_err("%s: invalid src buffer (len=%lu offset=%x)\n",
__func__, src_len, info.src.offset);
rc = -ERANGE;
goto rotate_prepare_error;
}
if (checkoffset(info.dst.offset,
dst_planes.plane_size[0],
dst_len)) {
pr_err("%s: invalid dst buffer (len=%lu offset=%x)\n",
__func__, dst_len, info.dst.offset);
rc = -ERANGE;
goto rotate_prepare_error;
}
rc = get_img(&info.src_chroma, ROTATOR_SRC_DOMAIN,
(unsigned long *)&in_chroma_paddr,
(unsigned long *)&src_len, &srcp1_file, &p_need,
&srcp1_ihdl, 0);
if (rc) {
pr_err("%s: in chroma get_img() failed id=0x%08x\n",
DRIVER_NAME, info.src_chroma.memory_id);
goto rotate_prepare_error;
}
rc = get_img(&info.dst_chroma, ROTATOR_DST_DOMAIN,
(unsigned long *)&out_chroma_paddr,
(unsigned long *)&dst_len, &dstp1_file, &p_need,
&dstp1_ihdl, img_info->secure);
if (rc) {
pr_err("%s: out chroma get_img() failed id=0x%08x\n",
DRIVER_NAME, info.dst_chroma.memory_id);
goto rotate_prepare_error;
}
if (checkoffset(info.src_chroma.offset,
src_planes.plane_size[1],
src_len)) {
pr_err("%s: invalid chr src buf len=%lu offset=%x\n",
__func__, src_len, info.src_chroma.offset);
rc = -ERANGE;
goto rotate_prepare_error;
}
if (checkoffset(info.dst_chroma.offset,
src_planes.plane_size[1],
dst_len)) {
pr_err("%s: invalid chr dst buf len=%lu offset=%x\n",
__func__, dst_len, info.dst_chroma.offset);
rc = -ERANGE;
goto rotate_prepare_error;
}
in_chroma_paddr += info.src_chroma.offset;
out_chroma_paddr += info.dst_chroma.offset;
} else {
if (checkoffset(info.src.offset,
src_planes.total_size,
src_len)) {
pr_err("%s: invalid src buffer (len=%lu offset=%x)\n",
__func__, src_len, info.src.offset);
rc = -ERANGE;
goto rotate_prepare_error;
}
if (checkoffset(info.dst.offset,
dst_planes.total_size,
dst_len)) {
pr_err("%s: invalid dst buffer (len=%lu offset=%x)\n",
__func__, dst_len, info.dst.offset);
rc = -ERANGE;
goto rotate_prepare_error;
}
}
in_paddr += info.src.offset;
out_paddr += info.dst.offset;
if (!in_chroma_paddr && src_planes.num_planes >= 2)
in_chroma_paddr = in_paddr + src_planes.plane_size[0];
if (!out_chroma_paddr && dst_planes.num_planes >= 2)
out_chroma_paddr = out_paddr + dst_planes.plane_size[0];
if (src_planes.num_planes >= 3)
in_chroma2_paddr = in_chroma_paddr + src_planes.plane_size[1];
if (dst_planes.num_planes >= 3)
out_chroma2_paddr = out_chroma_paddr + dst_planes.plane_size[1];
commit_info->data_info = info;
commit_info->img_info = *img_info;
commit_info->format = format;
commit_info->in_paddr = in_paddr;
commit_info->out_paddr = out_paddr;
commit_info->in_chroma_paddr = in_chroma_paddr;
commit_info->out_chroma_paddr = out_chroma_paddr;
commit_info->in_chroma2_paddr = in_chroma2_paddr;
commit_info->out_chroma2_paddr = out_chroma2_paddr;
commit_info->srcp0_file = srcp0_file;
commit_info->srcp1_file = srcp1_file;
commit_info->srcp0_ihdl = srcp0_ihdl;
commit_info->srcp1_ihdl = srcp1_ihdl;
commit_info->dstp0_file = dstp0_file;
commit_info->dstp0_ihdl = dstp0_ihdl;
commit_info->dstp1_file = dstp1_file;
commit_info->dstp1_ihdl = dstp1_ihdl;
commit_info->ps0_need = ps0_need;
commit_info->session_index = s;
commit_info->acq_fen = msm_rotator_dev->sync_info[s].acq_fen;
commit_info->fast_yuv_en = mrd->rot_session[s]->fast_yuv_enable;
commit_info->enable_2pass = mrd->rot_session[s]->enable_2pass;
mutex_unlock(&msm_rotator_dev->rotator_lock);
return 0;
rotate_prepare_error:
put_img(dstp1_file, dstp1_ihdl, ROTATOR_DST_DOMAIN,
msm_rotator_dev->rot_session[s]->img_info.secure);
put_img(srcp1_file, srcp1_ihdl, ROTATOR_SRC_DOMAIN, 0);
put_img(dstp0_file, dstp0_ihdl, ROTATOR_DST_DOMAIN,
msm_rotator_dev->rot_session[s]->img_info.secure);
/* only source may use frame buffer */
if (info.src.flags & MDP_MEMORY_ID_TYPE_FB)
fput_light(srcp0_file, ps0_need);
else
put_img(srcp0_file, srcp0_ihdl, ROTATOR_SRC_DOMAIN, 0);
dev_dbg(msm_rotator_dev->device, "%s() returning rc = %d\n",
__func__, rc);
mutex_unlock(&msm_rotator_dev->rotator_lock);
return rc;
}
static int msm_rotator_do_rotate_sub(
struct msm_rotator_commit_info *commit_info)
{
unsigned int status, format;
struct msm_rotator_data_info info;
unsigned int in_paddr, out_paddr;
int use_imem = 0, rc = 0;
struct file *srcp0_file, *dstp0_file;
struct file *srcp1_file, *dstp1_file;
struct ion_handle *srcp0_ihdl, *dstp0_ihdl;
struct ion_handle *srcp1_ihdl, *dstp1_ihdl;
int s, ps0_need;
unsigned int in_chroma_paddr, out_chroma_paddr;
unsigned int in_chroma2_paddr, out_chroma2_paddr;
struct msm_rotator_img_info *img_info;
mutex_lock(&msm_rotator_dev->rotator_lock);
info = commit_info->data_info;
img_info = &commit_info->img_info;
format = commit_info->format;
in_paddr = commit_info->in_paddr;
out_paddr = commit_info->out_paddr;
in_chroma_paddr = commit_info->in_chroma_paddr;
out_chroma_paddr = commit_info->out_chroma_paddr;
in_chroma2_paddr = commit_info->in_chroma2_paddr;
out_chroma2_paddr = commit_info->out_chroma2_paddr;
srcp0_file = commit_info->srcp0_file;
srcp1_file = commit_info->srcp1_file;
srcp0_ihdl = commit_info->srcp0_ihdl;
srcp1_ihdl = commit_info->srcp1_ihdl;
dstp0_file = commit_info->dstp0_file;
dstp0_ihdl = commit_info->dstp0_ihdl;
dstp1_file = commit_info->dstp1_file;
dstp1_ihdl = commit_info->dstp1_ihdl;
ps0_need = commit_info->ps0_need;
s = commit_info->session_index;
msm_rotator_wait_for_fence(commit_info->acq_fen);
commit_info->acq_fen = NULL;
cancel_delayed_work_sync(&msm_rotator_dev->rot_clk_work);
if (msm_rotator_dev->rot_clk_state != CLK_EN) {
enable_rot_clks();
msm_rotator_dev->rot_clk_state = CLK_EN;
}
enable_irq(msm_rotator_dev->irq);
#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
use_imem = msm_rotator_imem_allocate(ROTATOR_REQUEST);
#else
use_imem = 0;
#endif
/*
* workaround for a hardware bug. rotator hardware hangs when we
* use write burst beat size 16 on 128X128 tile fetch mode. As a
* temporary fix use 0x42 for BURST_SIZE when imem used.
*/
if (use_imem)
iowrite32(0x42, MSM_ROTATOR_MAX_BURST_SIZE);
iowrite32(((img_info->src_rect.h & 0x1fff)
<< 16) |
(img_info->src_rect.w & 0x1fff),
MSM_ROTATOR_SRC_SIZE);
iowrite32(((img_info->src_rect.y & 0x1fff)
<< 16) |
(img_info->src_rect.x & 0x1fff),
MSM_ROTATOR_SRC_XY);
iowrite32(((img_info->src.height & 0x1fff)
<< 16) |
(img_info->src.width & 0x1fff),
MSM_ROTATOR_SRC_IMAGE_SIZE);
switch (format) {
case MDP_RGB_565:
case MDP_BGR_565:
case MDP_RGB_888:
case MDP_ARGB_8888:
case MDP_RGBA_8888:
case MDP_XRGB_8888:
case MDP_BGRA_8888:
case MDP_RGBX_8888:
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
rc = msm_rotator_rgb_types(img_info,
in_paddr, out_paddr,
use_imem,
msm_rotator_dev->last_session_idx
!= s);
break;
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CB_CR_H2V2:
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CRCB_H2V2_TILE:
case MDP_Y_CBCR_H2V2_TILE:
if (!commit_info->enable_2pass)
rc = msm_rotator_ycxcx_h2v2(img_info,
in_paddr, out_paddr, use_imem,
msm_rotator_dev->last_session_idx
!= s,
in_chroma_paddr,
out_chroma_paddr,
in_chroma2_paddr,
out_chroma2_paddr,
commit_info->fast_yuv_en);
else
rc = msm_rotator_ycxcx_h2v2_2pass(img_info,
in_paddr, out_paddr, use_imem,
msm_rotator_dev->last_session_idx
!= s,
in_chroma_paddr,
out_chroma_paddr,
in_chroma2_paddr,
out_chroma2_paddr,
commit_info->fast_yuv_en,
commit_info->enable_2pass,
s);
break;
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H2V1:
rc = msm_rotator_ycxcx_h2v1(img_info,
in_paddr, out_paddr, use_imem,
msm_rotator_dev->last_session_idx
!= s,
in_chroma_paddr,
out_chroma_paddr);
break;
case MDP_YCRYCB_H2V1:
rc = msm_rotator_ycrycb(img_info,
in_paddr, out_paddr, use_imem,
msm_rotator_dev->last_session_idx != s,
out_chroma_paddr);
break;
default:
rc = -EINVAL;
pr_err("%s(): Unsupported format %u\n", __func__, format);
goto do_rotate_exit;
}
if (rc != 0) {
msm_rotator_dev->last_session_idx = INVALID_SESSION;
pr_err("%s(): Invalid session error\n", __func__);
goto do_rotate_exit;
}
iowrite32(3, MSM_ROTATOR_INTR_ENABLE);
msm_rotator_dev->processing = 1;
iowrite32(0x1, MSM_ROTATOR_START);
wait_event(msm_rotator_dev->wq,
(msm_rotator_dev->processing == 0));
status = (unsigned char)ioread32(MSM_ROTATOR_INTR_STATUS);
if ((status & 0x03) != 0x01) {
pr_err("%s(): AXI Bus Error, issuing SW_RESET\n", __func__);
iowrite32(0x1, MSM_ROTATOR_SW_RESET);
rc = -EFAULT;
}
iowrite32(0, MSM_ROTATOR_INTR_ENABLE);
iowrite32(3, MSM_ROTATOR_INTR_CLEAR);
do_rotate_exit:
disable_irq(msm_rotator_dev->irq);
#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
msm_rotator_imem_free(ROTATOR_REQUEST);
#endif
schedule_delayed_work(&msm_rotator_dev->rot_clk_work, HZ);
put_img(dstp1_file, dstp1_ihdl, ROTATOR_DST_DOMAIN,
img_info->secure);
put_img(srcp1_file, srcp1_ihdl, ROTATOR_SRC_DOMAIN, 0);
put_img(dstp0_file, dstp0_ihdl, ROTATOR_DST_DOMAIN,
img_info->secure);
/* only source may use frame buffer */
if (info.src.flags & MDP_MEMORY_ID_TYPE_FB)
fput_light(srcp0_file, ps0_need);
else
put_img(srcp0_file, srcp0_ihdl, ROTATOR_SRC_DOMAIN, 0);
msm_rotator_signal_timeline_done(s);
mutex_unlock(&msm_rotator_dev->rotator_lock);
dev_dbg(msm_rotator_dev->device, "%s() returning rc = %d\n",
__func__, rc);
return rc;
}
static void rot_wait_for_commit_queue(u32 is_all)
{
int ret = 0;
u32 loop_cnt = 0;
while (1) {
mutex_lock(&mrd->commit_mutex);
if (is_all && (atomic_read(&mrd->commit_q_cnt) == 0))
break;
if ((!is_all) &&
(atomic_read(&mrd->commit_q_cnt) < MAX_COMMIT_QUEUE))
break;
INIT_COMPLETION(mrd->commit_comp);
mutex_unlock(&mrd->commit_mutex);
ret = wait_for_completion_interruptible_timeout(
&mrd->commit_comp,
msecs_to_jiffies(WAIT_ROT_TIMEOUT));
if ((ret <= 0) ||
(atomic_read(&mrd->commit_q_cnt) >= MAX_COMMIT_QUEUE) ||
(loop_cnt > MAX_COMMIT_QUEUE)) {
pr_err("%s wait for commit queue failed ret=%d pointers:%d %d",
__func__, ret, atomic_read(&mrd->commit_q_r),
atomic_read(&mrd->commit_q_w));
mutex_lock(&mrd->commit_mutex);
ret = -ETIME;
break;
} else {
ret = 0;
}
loop_cnt++;
};
if (is_all || ret) {
atomic_set(&mrd->commit_q_r, 0);
atomic_set(&mrd->commit_q_cnt, 0);
atomic_set(&mrd->commit_q_w, 0);
}
mutex_unlock(&mrd->commit_mutex);
}
static int msm_rotator_do_rotate(unsigned long arg)
{
struct msm_rotator_data_info info;
struct rot_sync_info *sync_info;
int session_index, ret;
int commit_q_w;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
rot_wait_for_commit_queue(false);
mutex_lock(&mrd->commit_mutex);
commit_q_w = atomic_read(&mrd->commit_q_w);
ret = msm_rotator_rotate_prepare(&info,
&mrd->commit_info[commit_q_w]);
if (ret) {
mutex_unlock(&mrd->commit_mutex);
return ret;
}
session_index = mrd->commit_info[commit_q_w].session_index;
sync_info = &msm_rotator_dev->sync_info[session_index];
mutex_lock(&sync_info->sync_mutex);
atomic_inc(&sync_info->queue_buf_cnt);
sync_info->acq_fen = NULL;
mutex_unlock(&sync_info->sync_mutex);
if (atomic_inc_return(&mrd->commit_q_w) >= MAX_COMMIT_QUEUE)
atomic_set(&mrd->commit_q_w, 0);
atomic_inc(&mrd->commit_q_cnt);
schedule_work(&mrd->commit_work);
mutex_unlock(&mrd->commit_mutex);
if (info.wait_for_finish)
rot_wait_for_commit_queue(true);
return 0;
}
static void rot_commit_wq_handler(struct work_struct *work)
{
mutex_lock(&mrd->commit_wq_mutex);
mutex_lock(&mrd->commit_mutex);
while (atomic_read(&mrd->commit_q_cnt) > 0) {
mrd->commit_running = true;
mutex_unlock(&mrd->commit_mutex);
msm_rotator_do_rotate_sub(
&mrd->commit_info[atomic_read(&mrd->commit_q_r)]);
mutex_lock(&mrd->commit_mutex);
if (atomic_read(&mrd->commit_q_cnt) > 0) {
atomic_dec(&mrd->commit_q_cnt);
if (atomic_inc_return(&mrd->commit_q_r) >=
MAX_COMMIT_QUEUE)
atomic_set(&mrd->commit_q_r, 0);
}
complete_all(&mrd->commit_comp);
}
mrd->commit_running = false;
if (atomic_read(&mrd->commit_q_r) != atomic_read(&mrd->commit_q_w))
pr_err("%s invalid state: r=%d w=%d cnt=%d", __func__,
atomic_read(&mrd->commit_q_r),
atomic_read(&mrd->commit_q_w),
atomic_read(&mrd->commit_q_cnt));
mutex_unlock(&mrd->commit_mutex);
mutex_unlock(&mrd->commit_wq_mutex);
}
static void msm_rotator_set_perf_level(u32 wh, u32 is_rgb)
{
u32 perf_level;
if (is_rgb)
perf_level = 1;
else if (wh <= (640 * 480))
perf_level = 2;
else if (wh <= (736 * 1280))
perf_level = 3;
else
perf_level = 4;
#ifdef CONFIG_MSM_BUS_SCALING
msm_bus_scale_client_update_request(msm_rotator_dev->bus_client_handle,
perf_level);
#endif
}
static int rot_enable_iommu_clocks(struct msm_rotator_dev *rot_dev)
{
int ret = 0, i;
if (rot_dev->mmu_clk_on)
return 0;
for (i = 0; i < ARRAY_SIZE(rot_mmu_clks); i++) {
rot_mmu_clks[i].mmu_clk = clk_get(&msm_rotator_dev->pdev->dev,
rot_mmu_clks[i].mmu_clk_name);
if (IS_ERR(rot_mmu_clks[i].mmu_clk)) {
pr_err(" %s: Get failed for clk %s", __func__,
rot_mmu_clks[i].mmu_clk_name);
ret = PTR_ERR(rot_mmu_clks[i].mmu_clk);
break;
}
ret = clk_prepare_enable(rot_mmu_clks[i].mmu_clk);
if (ret) {
clk_put(rot_mmu_clks[i].mmu_clk);
rot_mmu_clks[i].mmu_clk = NULL;
}
}
if (ret) {
for (i--; i >= 0; i--) {
clk_disable_unprepare(rot_mmu_clks[i].mmu_clk);
clk_put(rot_mmu_clks[i].mmu_clk);
rot_mmu_clks[i].mmu_clk = NULL;
}
} else {
rot_dev->mmu_clk_on = 1;
}
return ret;
}
static int rot_disable_iommu_clocks(struct msm_rotator_dev *rot_dev)
{
int i;
if (!rot_dev->mmu_clk_on)
return 0;
for (i = 0; i < ARRAY_SIZE(rot_mmu_clks); i++) {
clk_disable_unprepare(rot_mmu_clks[i].mmu_clk);
clk_put(rot_mmu_clks[i].mmu_clk);
rot_mmu_clks[i].mmu_clk = NULL;
}
rot_dev->mmu_clk_on = 0;
return 0;
}
static int map_sec_resource(struct msm_rotator_dev *rot_dev)
{
int ret = 0;
if (rot_dev->sec_mapped)
return 0;
ret = rot_enable_iommu_clocks(rot_dev);
if (ret) {
pr_err("IOMMU clock enabled failed while open");
return ret;
}
ret = msm_ion_secure_heap(ION_HEAP(ION_CP_MM_HEAP_ID));
if (ret)
pr_err("ION heap secure failed heap id %d ret %d\n",
ION_CP_MM_HEAP_ID, ret);
else
rot_dev->sec_mapped = 1;
rot_disable_iommu_clocks(rot_dev);
return ret;
}
static int unmap_sec_resource(struct msm_rotator_dev *rot_dev)
{
int ret = 0;
ret = rot_enable_iommu_clocks(rot_dev);
if (ret) {
pr_err("IOMMU clock enabled failed while close\n");
return ret;
}
msm_ion_unsecure_heap(ION_HEAP(ION_CP_MM_HEAP_ID));
rot_dev->sec_mapped = 0;
rot_disable_iommu_clocks(rot_dev);
return ret;
}
static int msm_rotator_start(unsigned long arg,
struct msm_rotator_fd_info *fd_info)
{
struct msm_rotator_img_info info;
struct msm_rotator_session *rot_session = NULL;
int rc = 0;
int s, is_rgb = 0;
int first_free_idx = INVALID_SESSION;
unsigned int dst_w, dst_h;
unsigned int is_planar420 = 0;
int fast_yuv_en = 0, enable_2pass = 0;
struct rot_sync_info *sync_info;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
if ((info.rotations > MSM_ROTATOR_MAX_ROT) ||
(info.src.height > MSM_ROTATOR_MAX_H) ||
(info.src.width > MSM_ROTATOR_MAX_W) ||
(info.dst.height > MSM_ROTATOR_MAX_H) ||
(info.dst.width > MSM_ROTATOR_MAX_W) ||
(info.downscale_ratio > MAX_DOWNSCALE_RATIO)) {
pr_err("%s: Invalid parameters\n", __func__);
return -EINVAL;
}
if (info.rotations & MDP_ROT_90) {
dst_w = info.src_rect.h >> info.downscale_ratio;
dst_h = info.src_rect.w >> info.downscale_ratio;
} else {
dst_w = info.src_rect.w >> info.downscale_ratio;
dst_h = info.src_rect.h >> info.downscale_ratio;
}
if (checkoffset(info.src_rect.x, info.src_rect.w, info.src.width) ||
checkoffset(info.src_rect.y, info.src_rect.h, info.src.height) ||
checkoffset(info.dst_x, dst_w, info.dst.width) ||
checkoffset(info.dst_y, dst_h, info.dst.height)) {
pr_err("%s: Invalid src or dst rect\n", __func__);
return -ERANGE;
}
switch (info.src.format) {
case MDP_Y_CB_CR_H2V2:
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
is_planar420 = 1;
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CRCB_H2V2_TILE:
case MDP_Y_CBCR_H2V2_TILE:
if (rotator_hw_revision >= ROTATOR_REVISION_V2) {
if (info.downscale_ratio &&
(info.rotations & MDP_ROT_90)) {
fast_yuv_en = !fast_yuv_invalid_size_checker(
0,
info.src.width,
info.src_rect.w >>
info.downscale_ratio,
info.src.height,
info.src_rect.h >>
info.downscale_ratio,
info.src_rect.w >>
info.downscale_ratio,
is_planar420);
fast_yuv_en = fast_yuv_en &&
!fast_yuv_invalid_size_checker(
info.rotations,
info.src_rect.w >>
info.downscale_ratio,
dst_w,
info.src_rect.h >>
info.downscale_ratio,
dst_h,
dst_w,
is_planar420);
} else {
fast_yuv_en = !fast_yuv_invalid_size_checker(
info.rotations,
info.src.width,
dst_w,
info.src.height,
dst_h,
dst_w,
is_planar420);
}
if (fast_yuv_en && info.downscale_ratio &&
(info.rotations & MDP_ROT_90))
enable_2pass = 1;
}
break;
default:
fast_yuv_en = 0;
}
switch (info.src.format) {
case MDP_RGB_565:
case MDP_BGR_565:
case MDP_RGB_888:
case MDP_ARGB_8888:
case MDP_RGBA_8888:
case MDP_XRGB_8888:
case MDP_RGBX_8888:
case MDP_BGRA_8888:
is_rgb = 1;
info.dst.format = info.src.format;
break;
case MDP_Y_CBCR_H2V1:
if (info.rotations & MDP_ROT_90) {
info.dst.format = MDP_Y_CBCR_H1V2;
break;
}
case MDP_Y_CRCB_H2V1:
if (info.rotations & MDP_ROT_90) {
info.dst.format = MDP_Y_CRCB_H1V2;
break;
}
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H2V2:
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
info.dst.format = info.src.format;
break;
case MDP_YCRYCB_H2V1:
if (info.rotations & MDP_ROT_90)
info.dst.format = MDP_Y_CRCB_H1V2;
else
info.dst.format = MDP_Y_CRCB_H2V1;
break;
case MDP_Y_CB_CR_H2V2:
if (fast_yuv_en) {
info.dst.format = info.src.format;
break;
}
case MDP_Y_CBCR_H2V2_TILE:
info.dst.format = MDP_Y_CBCR_H2V2;
break;
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
if (fast_yuv_en) {
info.dst.format = info.src.format;
break;
}
case MDP_Y_CRCB_H2V2_TILE:
info.dst.format = MDP_Y_CRCB_H2V2;
break;
default:
return -EINVAL;
}
mutex_lock(&msm_rotator_dev->rotator_lock);
msm_rotator_set_perf_level((info.src.width*info.src.height), is_rgb);
for (s = 0; s < MAX_SESSIONS; s++) {
if ((msm_rotator_dev->rot_session[s] != NULL) &&
(info.session_id ==
(unsigned int)msm_rotator_dev->rot_session[s]
)) {
rot_session = msm_rotator_dev->rot_session[s];
rot_session->img_info = info;
rot_session->fd_info = *fd_info;
rot_session->fast_yuv_enable = fast_yuv_en;
rot_session->enable_2pass = enable_2pass;
if (msm_rotator_dev->last_session_idx == s)
msm_rotator_dev->last_session_idx =
INVALID_SESSION;
break;
}
if ((msm_rotator_dev->rot_session[s] == NULL) &&
(first_free_idx == INVALID_SESSION))
first_free_idx = s;
}
if ((s == MAX_SESSIONS) && (first_free_idx != INVALID_SESSION)) {
/* allocate a session id */
msm_rotator_dev->rot_session[first_free_idx] =
kzalloc(sizeof(struct msm_rotator_session),
GFP_KERNEL);
if (!msm_rotator_dev->rot_session[first_free_idx]) {
printk(KERN_ERR "%s : unable to alloc mem\n",
__func__);
rc = -ENOMEM;
goto rotator_start_exit;
}
info.session_id = (unsigned int)
msm_rotator_dev->rot_session[first_free_idx];
rot_session = msm_rotator_dev->rot_session[first_free_idx];
rot_session->img_info = info;
rot_session->fd_info = *fd_info;
rot_session->fast_yuv_enable = fast_yuv_en;
rot_session->enable_2pass = enable_2pass;
if (!IS_ERR_OR_NULL(mrd->client)) {
if (rot_session->img_info.secure) {
rot_session->mem_hid &= ~BIT(ION_IOMMU_HEAP_ID);
rot_session->mem_hid |= BIT(ION_CP_MM_HEAP_ID);
rot_session->mem_hid |= ION_SECURE;
} else {
rot_session->mem_hid &= ~BIT(ION_CP_MM_HEAP_ID);
rot_session->mem_hid &= ~ION_SECURE;
rot_session->mem_hid |= BIT(ION_IOMMU_HEAP_ID);
}
}
s = first_free_idx;
} else if (s == MAX_SESSIONS) {
dev_dbg(msm_rotator_dev->device, "%s: all sessions in use\n",
__func__);
rc = -EBUSY;
}
if (rc == 0 && copy_to_user((void __user *)arg, &info, sizeof(info)))
rc = -EFAULT;
if ((rc == 0) && (info.secure))
map_sec_resource(msm_rotator_dev);
sync_info = &msm_rotator_dev->sync_info[s];
if ((rc == 0) && (sync_info->initialized == false)) {
char timeline_name[MAX_TIMELINE_NAME_LEN];
if (sync_info->timeline == NULL) {
snprintf(timeline_name, sizeof(timeline_name),
"msm_rot_%d", first_free_idx);
sync_info->timeline =
sw_sync_timeline_create(timeline_name);
if (sync_info->timeline == NULL)
pr_err("%s: cannot create %s time line",
__func__, timeline_name);
sync_info->timeline_value = 0;
}
mutex_init(&sync_info->sync_mutex);
sync_info->initialized = true;
}
sync_info->acq_fen = NULL;
atomic_set(&sync_info->queue_buf_cnt, 0);
rotator_start_exit:
mutex_unlock(&msm_rotator_dev->rotator_lock);
return rc;
}
static int msm_rotator_finish(unsigned long arg)
{
int rc = 0;
int s;
unsigned int session_id;
if (copy_from_user(&session_id, (void __user *)arg, sizeof(s)))
return -EFAULT;
rot_wait_for_commit_queue(true);
mutex_lock(&msm_rotator_dev->rotator_lock);
for (s = 0; s < MAX_SESSIONS; s++) {
if ((msm_rotator_dev->rot_session[s] != NULL) &&
(session_id ==
(unsigned int)msm_rotator_dev->rot_session[s])) {
if (msm_rotator_dev->last_session_idx == s)
msm_rotator_dev->last_session_idx =
INVALID_SESSION;
msm_rotator_signal_timeline(s);
msm_rotator_release_acq_fence(s);
if (msm_rotator_dev->rot_session[s]->enable_2pass) {
rotator_free_2pass_buf(mrd->y_rot_buf, s);
rotator_free_2pass_buf(mrd->chroma_rot_buf, s);
rotator_free_2pass_buf(mrd->chroma2_rot_buf, s);
}
kfree(msm_rotator_dev->rot_session[s]);
msm_rotator_dev->rot_session[s] = NULL;
break;
}
}
if (s == MAX_SESSIONS)
rc = -EINVAL;
#ifdef CONFIG_MSM_BUS_SCALING
msm_bus_scale_client_update_request(msm_rotator_dev->bus_client_handle,
0);
#endif
if (msm_rotator_dev->sec_mapped)
unmap_sec_resource(msm_rotator_dev);
mutex_unlock(&msm_rotator_dev->rotator_lock);
return rc;
}
static int
msm_rotator_open(struct inode *inode, struct file *filp)
{
struct msm_rotator_fd_info *tmp, *fd_info = NULL;
int i;
if (filp->private_data)
return -EBUSY;
mutex_lock(&msm_rotator_dev->rotator_lock);
for (i = 0; i < MAX_SESSIONS; i++) {
if (msm_rotator_dev->rot_session[i] == NULL)
break;
}
if (i == MAX_SESSIONS) {
mutex_unlock(&msm_rotator_dev->rotator_lock);
return -EBUSY;
}
list_for_each_entry(tmp, &msm_rotator_dev->fd_list, list) {
if (tmp->pid == current->pid) {
fd_info = tmp;
break;
}
}
if (!fd_info) {
fd_info = kzalloc(sizeof(*fd_info), GFP_KERNEL);
if (!fd_info) {
mutex_unlock(&msm_rotator_dev->rotator_lock);
pr_err("%s: insufficient memory to alloc resources\n",
__func__);
return -ENOMEM;
}
list_add(&fd_info->list, &msm_rotator_dev->fd_list);
fd_info->pid = current->pid;
}
fd_info->ref_cnt++;
mutex_unlock(&msm_rotator_dev->rotator_lock);
filp->private_data = fd_info;
return 0;
}
static int
msm_rotator_close(struct inode *inode, struct file *filp)
{
struct msm_rotator_fd_info *fd_info;
int s;
fd_info = (struct msm_rotator_fd_info *)filp->private_data;
mutex_lock(&msm_rotator_dev->rotator_lock);
if (--fd_info->ref_cnt > 0) {
mutex_unlock(&msm_rotator_dev->rotator_lock);
return 0;
}
for (s = 0; s < MAX_SESSIONS; s++) {
if (msm_rotator_dev->rot_session[s] != NULL &&
&(msm_rotator_dev->rot_session[s]->fd_info) == fd_info) {
pr_debug("%s: freeing rotator session %p (pid %d)\n",
__func__, msm_rotator_dev->rot_session[s],
fd_info->pid);
rot_wait_for_commit_queue(true);
msm_rotator_signal_timeline(s);
kfree(msm_rotator_dev->rot_session[s]);
msm_rotator_dev->rot_session[s] = NULL;
if (msm_rotator_dev->last_session_idx == s)
msm_rotator_dev->last_session_idx =
INVALID_SESSION;
}
}
list_del(&fd_info->list);
kfree(fd_info);
mutex_unlock(&msm_rotator_dev->rotator_lock);
return 0;
}
static long msm_rotator_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
struct msm_rotator_fd_info *fd_info;
if (_IOC_TYPE(cmd) != MSM_ROTATOR_IOCTL_MAGIC)
return -ENOTTY;
fd_info = (struct msm_rotator_fd_info *)file->private_data;
switch (cmd) {
case MSM_ROTATOR_IOCTL_START:
return msm_rotator_start(arg, fd_info);
case MSM_ROTATOR_IOCTL_ROTATE:
return msm_rotator_do_rotate(arg);
case MSM_ROTATOR_IOCTL_FINISH:
return msm_rotator_finish(arg);
case MSM_ROTATOR_IOCTL_BUFFER_SYNC:
return msm_rotator_buf_sync(arg);
default:
dev_dbg(msm_rotator_dev->device,
"unexpected IOCTL %d\n", cmd);
return -ENOTTY;
}
}
static const struct file_operations msm_rotator_fops = {
.owner = THIS_MODULE,
.open = msm_rotator_open,
.release = msm_rotator_close,
.unlocked_ioctl = msm_rotator_ioctl,
};
static int __devinit msm_rotator_probe(struct platform_device *pdev)
{
int rc = 0;
struct resource *res;
struct msm_rotator_platform_data *pdata = NULL;
int i, number_of_clks;
uint32_t ver;
msm_rotator_dev = kzalloc(sizeof(struct msm_rotator_dev), GFP_KERNEL);
if (!msm_rotator_dev) {
printk(KERN_ERR "%s Unable to allocate memory for struct\n",
__func__);
return -ENOMEM;
}
for (i = 0; i < MAX_SESSIONS; i++)
msm_rotator_dev->rot_session[i] = NULL;
msm_rotator_dev->last_session_idx = INVALID_SESSION;
pdata = pdev->dev.platform_data;
number_of_clks = pdata->number_of_clocks;
rot_iommu_split_domain = pdata->rot_iommu_split_domain;
msm_rotator_dev->imem_owner = IMEM_NO_OWNER;
mutex_init(&msm_rotator_dev->imem_lock);
INIT_LIST_HEAD(&msm_rotator_dev->fd_list);
msm_rotator_dev->imem_clk_state = CLK_DIS;
INIT_DELAYED_WORK(&msm_rotator_dev->imem_clk_work,
msm_rotator_imem_clk_work_f);
msm_rotator_dev->imem_clk = NULL;
msm_rotator_dev->pdev = pdev;
msm_rotator_dev->core_clk = NULL;
msm_rotator_dev->pclk = NULL;
mrd->y_rot_buf = kmalloc(sizeof(struct rot_buf_type), GFP_KERNEL);
mrd->chroma_rot_buf = kmalloc(sizeof(struct rot_buf_type), GFP_KERNEL);
mrd->chroma2_rot_buf = kmalloc(sizeof(struct rot_buf_type), GFP_KERNEL);
memset((void *)mrd->y_rot_buf, 0, sizeof(struct rot_buf_type));
memset((void *)mrd->chroma_rot_buf, 0, sizeof(struct rot_buf_type));
memset((void *)mrd->chroma2_rot_buf, 0, sizeof(struct rot_buf_type));
#ifdef CONFIG_MSM_BUS_SCALING
if (!msm_rotator_dev->bus_client_handle && pdata &&
pdata->bus_scale_table) {
msm_rotator_dev->bus_client_handle =
msm_bus_scale_register_client(
pdata->bus_scale_table);
if (!msm_rotator_dev->bus_client_handle) {
pr_err("%s not able to get bus scale handle\n",
__func__);
}
}
#endif
for (i = 0; i < number_of_clks; i++) {
if (pdata->rotator_clks[i].clk_type == ROTATOR_IMEM_CLK) {
msm_rotator_dev->imem_clk =
clk_get(&msm_rotator_dev->pdev->dev,
pdata->rotator_clks[i].clk_name);
if (IS_ERR(msm_rotator_dev->imem_clk)) {
rc = PTR_ERR(msm_rotator_dev->imem_clk);
msm_rotator_dev->imem_clk = NULL;
printk(KERN_ERR "%s: cannot get imem_clk "
"rc=%d\n", DRIVER_NAME, rc);
goto error_imem_clk;
}
if (pdata->rotator_clks[i].clk_rate)
clk_set_rate(msm_rotator_dev->imem_clk,
pdata->rotator_clks[i].clk_rate);
}
if (pdata->rotator_clks[i].clk_type == ROTATOR_PCLK) {
msm_rotator_dev->pclk =
clk_get(&msm_rotator_dev->pdev->dev,
pdata->rotator_clks[i].clk_name);
if (IS_ERR(msm_rotator_dev->pclk)) {
rc = PTR_ERR(msm_rotator_dev->pclk);
msm_rotator_dev->pclk = NULL;
printk(KERN_ERR "%s: cannot get pclk rc=%d\n",
DRIVER_NAME, rc);
goto error_pclk;
}
if (pdata->rotator_clks[i].clk_rate)
clk_set_rate(msm_rotator_dev->pclk,
pdata->rotator_clks[i].clk_rate);
}
if (pdata->rotator_clks[i].clk_type == ROTATOR_CORE_CLK) {
msm_rotator_dev->core_clk =
clk_get(&msm_rotator_dev->pdev->dev,
pdata->rotator_clks[i].clk_name);
if (IS_ERR(msm_rotator_dev->core_clk)) {
rc = PTR_ERR(msm_rotator_dev->core_clk);
msm_rotator_dev->core_clk = NULL;
printk(KERN_ERR "%s: cannot get core clk "
"rc=%d\n", DRIVER_NAME, rc);
goto error_core_clk;
}
if (pdata->rotator_clks[i].clk_rate)
clk_set_rate(msm_rotator_dev->core_clk,
pdata->rotator_clks[i].clk_rate);
}
}
msm_rotator_dev->regulator = regulator_get(&msm_rotator_dev->pdev->dev,
"vdd");
if (IS_ERR(msm_rotator_dev->regulator))
msm_rotator_dev->regulator = NULL;
msm_rotator_dev->rot_clk_state = CLK_DIS;
INIT_DELAYED_WORK(&msm_rotator_dev->rot_clk_work,
msm_rotator_rot_clk_work_f);
mutex_init(&msm_rotator_dev->rotator_lock);
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
msm_rotator_dev->client = msm_ion_client_create(-1, pdev->name);
#endif
platform_set_drvdata(pdev, msm_rotator_dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
printk(KERN_ALERT
"%s: could not get IORESOURCE_MEM\n", DRIVER_NAME);
rc = -ENODEV;
goto error_get_resource;
}
msm_rotator_dev->io_base = ioremap(res->start,
resource_size(res));
#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
if (msm_rotator_dev->imem_clk)
clk_prepare_enable(msm_rotator_dev->imem_clk);
#endif
enable_rot_clks();
ver = ioread32(MSM_ROTATOR_HW_VERSION);
disable_rot_clks();
#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
if (msm_rotator_dev->imem_clk)
clk_disable_unprepare(msm_rotator_dev->imem_clk);
#endif
if (ver != pdata->hardware_version_number)
pr_debug("%s: invalid HW version ver 0x%x\n",
DRIVER_NAME, ver);
rotator_hw_revision = ver;
rotator_hw_revision >>= 16; /* bit 31:16 */
rotator_hw_revision &= 0xff;
pr_info("%s: rotator_hw_revision=%x\n",
__func__, rotator_hw_revision);
msm_rotator_dev->irq = platform_get_irq(pdev, 0);
if (msm_rotator_dev->irq < 0) {
printk(KERN_ALERT "%s: could not get IORESOURCE_IRQ\n",
DRIVER_NAME);
rc = -ENODEV;
goto error_get_irq;
}
rc = request_irq(msm_rotator_dev->irq, msm_rotator_isr,
IRQF_TRIGGER_RISING, DRIVER_NAME, NULL);
if (rc) {
printk(KERN_ERR "%s: request_irq() failed\n", DRIVER_NAME);
goto error_get_irq;
}
/* we enable the IRQ when we need it in the ioctl */
disable_irq(msm_rotator_dev->irq);
rc = alloc_chrdev_region(&msm_rotator_dev->dev_num, 0, 1, DRIVER_NAME);
if (rc < 0) {
printk(KERN_ERR "%s: alloc_chrdev_region Failed rc = %d\n",
__func__, rc);
goto error_get_irq;
}
msm_rotator_dev->class = class_create(THIS_MODULE, DRIVER_NAME);
if (IS_ERR(msm_rotator_dev->class)) {
rc = PTR_ERR(msm_rotator_dev->class);
printk(KERN_ERR "%s: couldn't create class rc = %d\n",
DRIVER_NAME, rc);
goto error_class_create;
}
msm_rotator_dev->device = device_create(msm_rotator_dev->class, NULL,
msm_rotator_dev->dev_num, NULL,
DRIVER_NAME);
if (IS_ERR(msm_rotator_dev->device)) {
rc = PTR_ERR(msm_rotator_dev->device);
printk(KERN_ERR "%s: device_create failed %d\n",
DRIVER_NAME, rc);
goto error_class_device_create;
}
cdev_init(&msm_rotator_dev->cdev, &msm_rotator_fops);
rc = cdev_add(&msm_rotator_dev->cdev,
MKDEV(MAJOR(msm_rotator_dev->dev_num), 0),
1);
if (rc < 0) {
printk(KERN_ERR "%s: cdev_add failed %d\n", __func__, rc);
goto error_cdev_add;
}
init_waitqueue_head(&msm_rotator_dev->wq);
INIT_WORK(&msm_rotator_dev->commit_work, rot_commit_wq_handler);
init_completion(&msm_rotator_dev->commit_comp);
mutex_init(&msm_rotator_dev->commit_mutex);
mutex_init(&msm_rotator_dev->commit_wq_mutex);
atomic_set(&msm_rotator_dev->commit_q_w, 0);
atomic_set(&msm_rotator_dev->commit_q_r, 0);
atomic_set(&msm_rotator_dev->commit_q_cnt, 0);
dev_dbg(msm_rotator_dev->device, "probe successful\n");
return rc;
error_cdev_add:
device_destroy(msm_rotator_dev->class, msm_rotator_dev->dev_num);
error_class_device_create:
class_destroy(msm_rotator_dev->class);
error_class_create:
unregister_chrdev_region(msm_rotator_dev->dev_num, 1);
error_get_irq:
iounmap(msm_rotator_dev->io_base);
error_get_resource:
mutex_destroy(&msm_rotator_dev->rotator_lock);
if (msm_rotator_dev->regulator)
regulator_put(msm_rotator_dev->regulator);
clk_put(msm_rotator_dev->core_clk);
error_core_clk:
clk_put(msm_rotator_dev->pclk);
error_pclk:
if (msm_rotator_dev->imem_clk)
clk_put(msm_rotator_dev->imem_clk);
error_imem_clk:
mutex_destroy(&msm_rotator_dev->imem_lock);
kfree(msm_rotator_dev);
return rc;
}
static int __devexit msm_rotator_remove(struct platform_device *plat_dev)
{
int i;
rot_wait_for_commit_queue(true);
#ifdef CONFIG_MSM_BUS_SCALING
if (msm_rotator_dev->bus_client_handle) {
msm_bus_scale_unregister_client
(msm_rotator_dev->bus_client_handle);
msm_rotator_dev->bus_client_handle = 0;
}
#endif
free_irq(msm_rotator_dev->irq, NULL);
mutex_destroy(&msm_rotator_dev->rotator_lock);
cdev_del(&msm_rotator_dev->cdev);
device_destroy(msm_rotator_dev->class, msm_rotator_dev->dev_num);
class_destroy(msm_rotator_dev->class);
unregister_chrdev_region(msm_rotator_dev->dev_num, 1);
iounmap(msm_rotator_dev->io_base);
if (msm_rotator_dev->imem_clk) {
if (msm_rotator_dev->imem_clk_state == CLK_EN)
clk_disable_unprepare(msm_rotator_dev->imem_clk);
clk_put(msm_rotator_dev->imem_clk);
msm_rotator_dev->imem_clk = NULL;
}
if (msm_rotator_dev->rot_clk_state == CLK_EN)
disable_rot_clks();
clk_put(msm_rotator_dev->core_clk);
clk_put(msm_rotator_dev->pclk);
if (msm_rotator_dev->regulator)
regulator_put(msm_rotator_dev->regulator);
msm_rotator_dev->core_clk = NULL;
msm_rotator_dev->pclk = NULL;
mutex_destroy(&msm_rotator_dev->imem_lock);
for (i = 0; i < MAX_SESSIONS; i++)
if (msm_rotator_dev->rot_session[i] != NULL)
kfree(msm_rotator_dev->rot_session[i]);
kfree(msm_rotator_dev);
return 0;
}
#ifdef CONFIG_PM
static int msm_rotator_suspend(struct platform_device *dev, pm_message_t state)
{
rot_wait_for_commit_queue(true);
mutex_lock(&msm_rotator_dev->imem_lock);
if (msm_rotator_dev->imem_clk_state == CLK_EN
&& msm_rotator_dev->imem_clk) {
clk_disable_unprepare(msm_rotator_dev->imem_clk);
msm_rotator_dev->imem_clk_state = CLK_SUSPEND;
}
mutex_unlock(&msm_rotator_dev->imem_lock);
mutex_lock(&msm_rotator_dev->rotator_lock);
if (msm_rotator_dev->rot_clk_state == CLK_EN) {
disable_rot_clks();
msm_rotator_dev->rot_clk_state = CLK_SUSPEND;
}
msm_rotator_release_all_timeline();
mutex_unlock(&msm_rotator_dev->rotator_lock);
return 0;
}
static int msm_rotator_resume(struct platform_device *dev)
{
mutex_lock(&msm_rotator_dev->imem_lock);
if (msm_rotator_dev->imem_clk_state == CLK_SUSPEND
&& msm_rotator_dev->imem_clk) {
clk_prepare_enable(msm_rotator_dev->imem_clk);
msm_rotator_dev->imem_clk_state = CLK_EN;
}
mutex_unlock(&msm_rotator_dev->imem_lock);
mutex_lock(&msm_rotator_dev->rotator_lock);
if (msm_rotator_dev->rot_clk_state == CLK_SUSPEND) {
enable_rot_clks();
msm_rotator_dev->rot_clk_state = CLK_EN;
}
mutex_unlock(&msm_rotator_dev->rotator_lock);
return 0;
}
#endif
static struct platform_driver msm_rotator_platform_driver = {
.probe = msm_rotator_probe,
.remove = __devexit_p(msm_rotator_remove),
#ifdef CONFIG_PM
.suspend = msm_rotator_suspend,
.resume = msm_rotator_resume,
#endif
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME
}
};
static int __init msm_rotator_init(void)
{
return platform_driver_register(&msm_rotator_platform_driver);
}
static void __exit msm_rotator_exit(void)
{
return platform_driver_unregister(&msm_rotator_platform_driver);
}
module_init(msm_rotator_init);
module_exit(msm_rotator_exit);
MODULE_DESCRIPTION("MSM Offline Image Rotator driver");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
| AdrianoMartins/android_kernel_lge_v500 | drivers/char/msm_rotator.c | C | gpl-2.0 | 88,858 |
/*
* f_mass_storage.c -- Mass Storage USB Composite Function
*
* Copyright (C) 2003-2008 Alan Stern
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz <mina86@mina86.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The names of the above-listed copyright holders may not be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* The Mass Storage Function acts as a USB Mass Storage device,
* appearing to the host as a disk drive or as a CD-ROM drive. In
* addition to providing an example of a genuinely useful composite
* function for a USB device, it also illustrates a technique of
* double-buffering for increased throughput.
*
* For more information about MSF and in particular its module
* parameters and sysfs interface read the
* <Documentation/usb/mass-storage.txt> file.
*/
/*
* MSF is configured by specifying a fsg_config structure. It has the
* following fields:
*
* nluns Number of LUNs function have (anywhere from 1
* to FSG_MAX_LUNS which is 8).
* luns An array of LUN configuration values. This
* should be filled for each LUN that
* function will include (ie. for "nluns"
* LUNs). Each element of the array has
* the following fields:
* ->filename The path to the backing file for the LUN.
* Required if LUN is not marked as
* removable.
* ->ro Flag specifying access to the LUN shall be
* read-only. This is implied if CD-ROM
* emulation is enabled as well as when
* it was impossible to open "filename"
* in R/W mode.
* ->removable Flag specifying that LUN shall be indicated as
* being removable.
* ->cdrom Flag specifying that LUN shall be reported as
* being a CD-ROM.
* ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12)
* commands for this LUN shall be ignored.
*
* vendor_name
* product_name
* release Information used as a reply to INQUIRY
* request. To use default set to NULL,
* NULL, 0xffff respectively. The first
* field should be 8 and the second 16
* characters or less.
*
* can_stall Set to permit function to halt bulk endpoints.
* Disabled on some USB devices known not
* to work correctly. You should set it
* to true.
*
* If "removable" is not set for a LUN then a backing file must be
* specified. If it is set, then NULL filename means the LUN's medium
* is not loaded (an empty string as "filename" in the fsg_config
* structure causes error). The CD-ROM emulation includes a single
* data track and no audio tracks; hence there need be only one
* backing file per LUN.
*
* This function is heavily based on "File-backed Storage Gadget" by
* Alan Stern which in turn is heavily based on "Gadget Zero" by David
* Brownell. The driver's SCSI command interface was based on the
* "Information technology - Small Computer System Interface - 2"
* document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
* available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
* The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
* was based on the "Universal Serial Bus Mass Storage Class UFI
* Command Specification" document, Revision 1.0, December 14, 1998,
* available at
* <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
*/
/*
* Driver Design
*
* The MSF is fairly straightforward. There is a main kernel
* thread that handles most of the work. Interrupt routines field
* callbacks from the controller driver: bulk- and interrupt-request
* completion notifications, endpoint-0 events, and disconnect events.
* Completion events are passed to the main thread by wakeup calls. Many
* ep0 requests are handled at interrupt time, but SetInterface,
* SetConfiguration, and device reset requests are forwarded to the
* thread in the form of "exceptions" using SIGUSR1 signals (since they
* should interrupt any ongoing file I/O operations).
*
* The thread's main routine implements the standard command/data/status
* parts of a SCSI interaction. It and its subroutines are full of tests
* for pending signals/exceptions -- all this polling is necessary since
* the kernel has no setjmp/longjmp equivalents. (Maybe this is an
* indication that the driver really wants to be running in userspace.)
* An important point is that so long as the thread is alive it keeps an
* open reference to the backing file. This will prevent unmounting
* the backing file's underlying filesystem and could cause problems
* during system shutdown, for example. To prevent such problems, the
* thread catches INT, TERM, and KILL signals and converts them into
* an EXIT exception.
*
* In normal operation the main thread is started during the gadget's
* fsg_bind() callback and stopped during fsg_unbind(). But it can
* also exit when it receives a signal, and there's no point leaving
* the gadget running when the thread is dead. As of this moment, MSF
* provides no way to deregister the gadget when thread dies -- maybe
* a callback functions is needed.
*
* To provide maximum throughput, the driver uses a circular pipeline of
* buffer heads (struct fsg_buffhd). In principle the pipeline can be
* arbitrarily long; in practice the benefits don't justify having more
* than 2 stages (i.e., double buffering). But it helps to think of the
* pipeline as being a long one. Each buffer head contains a bulk-in and
* a bulk-out request pointer (since the buffer can be used for both
* output and input -- directions always are given from the host's
* point of view) as well as a pointer to the buffer and various state
* variables.
*
* Use of the pipeline follows a simple protocol. There is a variable
* (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
* At any time that buffer head may still be in use from an earlier
* request, so each buffer head has a state variable indicating whether
* it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
* buffer head to be EMPTY, filling the buffer either by file I/O or by
* USB I/O (during which the buffer head is BUSY), and marking the buffer
* head FULL when the I/O is complete. Then the buffer will be emptied
* (again possibly by USB I/O, during which it is marked BUSY) and
* finally marked EMPTY again (possibly by a completion routine).
*
* A module parameter tells the driver to avoid stalling the bulk
* endpoints wherever the transport specification allows. This is
* necessary for some UDCs like the SuperH, which cannot reliably clear a
* halt on a bulk endpoint. However, under certain circumstances the
* Bulk-only specification requires a stall. In such cases the driver
* will halt the endpoint and set a flag indicating that it should clear
* the halt in software during the next device reset. Hopefully this
* will permit everything to work correctly. Furthermore, although the
* specification allows the bulk-out endpoint to halt when the host sends
* too much data, implementing this would cause an unavoidable race.
* The driver will always use the "no-stall" approach for OUT transfers.
*
* One subtle point concerns sending status-stage responses for ep0
* requests. Some of these requests, such as device reset, can involve
* interrupting an ongoing file I/O operation, which might take an
* arbitrarily long time. During that delay the host might give up on
* the original ep0 request and issue a new one. When that happens the
* driver should not notify the host about completion of the original
* request, as the host will no longer be waiting for it. So the driver
* assigns to each ep0 request a unique tag, and it keeps track of the
* tag value of the request associated with a long-running exception
* (device-reset, interface-change, or configuration-change). When the
* exception handler is finished, the status-stage response is submitted
* only if the current ep0 request tag is equal to the exception request
* tag. Thus only the most recently received ep0 request will get a
* status-stage response.
*
* Warning: This driver source file is too long. It ought to be split up
* into a header file plus about 3 separate .c files, to handle the details
* of the Gadget, USB Mass Storage, and SCSI protocols.
*/
/* #define VERBOSE_DEBUG */
/* #define DUMP_MSGS */
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/dcache.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/limits.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/freezer.h>
#include <linux/module.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
#include "gadget_chips.h"
#include "configfs.h"
/*------------------------------------------------------------------------*/
#define FSG_DRIVER_DESC "Mass Storage Function"
#define FSG_DRIVER_VERSION "2009/09/11"
static const char fsg_string_interface[] = "Mass Storage";
#include "storage_common.h"
#include "f_mass_storage.h"
/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
static struct usb_string fsg_strings[] = {
{FSG_STRING_INTERFACE, fsg_string_interface},
{}
};
static struct usb_gadget_strings fsg_stringtab = {
.language = 0x0409, /* en-us */
.strings = fsg_strings,
};
static struct usb_gadget_strings *fsg_strings_array[] = {
&fsg_stringtab,
NULL,
};
/*-------------------------------------------------------------------------*/
/*
* If USB mass storage vfs operation is stuck for more than 10 sec
* host will initiate the reset. Configure the timer with 9 sec to print
* the error message before host is intiating the resume on it.
*/
#define MSC_VFS_TIMER_PERIOD_MS 9000
static int msc_vfs_timer_period_ms = MSC_VFS_TIMER_PERIOD_MS;
module_param(msc_vfs_timer_period_ms, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(msc_vfs_timer_period_ms, "Set period for MSC VFS timer");
static int write_error_after_csw_sent;
static int must_report_residue;
static int csw_sent;
struct fsg_dev;
struct fsg_common;
/* Data shared by all the FSG instances. */
struct fsg_common {
struct usb_gadget *gadget;
struct usb_composite_dev *cdev;
struct fsg_dev *fsg, *new_fsg;
wait_queue_head_t fsg_wait;
/* filesem protects: backing files in use */
struct rw_semaphore filesem;
/* lock protects: state, all the req_busy's */
spinlock_t lock;
struct usb_ep *ep0; /* Copy of gadget->ep0 */
struct usb_request *ep0req; /* Copy of cdev->req */
unsigned int ep0_req_tag;
struct fsg_buffhd *next_buffhd_to_fill;
struct fsg_buffhd *next_buffhd_to_drain;
struct fsg_buffhd *buffhds;
unsigned int fsg_num_buffers;
int cmnd_size;
u8 cmnd[MAX_COMMAND_SIZE];
unsigned int nluns;
unsigned int lun;
struct fsg_lun **luns;
struct fsg_lun *curlun;
unsigned int bulk_out_maxpacket;
enum fsg_state state; /* For exception handling */
unsigned int exception_req_tag;
enum data_direction data_dir;
u32 data_size;
u32 data_size_from_cmnd;
u32 tag;
u32 residue;
u32 usb_amount_left;
unsigned int can_stall:1;
unsigned int free_storage_on_release:1;
unsigned int phase_error:1;
unsigned int short_packet_received:1;
unsigned int bad_lun_okay:1;
unsigned int running:1;
unsigned int sysfs:1;
int thread_wakeup_needed;
struct completion thread_notifier;
struct task_struct *thread_task;
/* Callback functions. */
const struct fsg_operations *ops;
/* Gadget's private data. */
void *private_data;
char inquiry_string[INQUIRY_MAX_LEN];
/* LUN name for sysfs purpose */
char name[FSG_MAX_LUNS][LUN_NAME_LEN];
struct kref ref;
struct timer_list vfs_timer;
};
struct fsg_dev {
struct usb_function function;
struct usb_gadget *gadget; /* Copy of cdev->gadget */
struct fsg_common *common;
u16 interface_number;
unsigned int bulk_in_enabled:1;
unsigned int bulk_out_enabled:1;
unsigned long atomic_bitflags;
#define IGNORE_BULK_OUT 0
struct usb_ep *bulk_in;
struct usb_ep *bulk_out;
};
static void msc_usb_vfs_timer_func(unsigned long data)
{
struct fsg_common *common = (struct fsg_common *) data;
switch (common->data_dir) {
case DATA_DIR_FROM_HOST:
dev_err(&common->curlun->dev,
"usb mass storage stuck in vfs_write\n");
break;
case DATA_DIR_TO_HOST:
dev_err(&common->curlun->dev,
"usb mass storage stuck in vfs_read\n");
break;
default:
dev_err(&common->curlun->dev,
"usb mass storage stuck in vfs_sync\n");
break;
}
}
static inline int __fsg_is_set(struct fsg_common *common,
const char *func, unsigned line)
{
if (common->fsg)
return 1;
ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
WARN_ON(1);
return 0;
}
#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
{
return container_of(f, struct fsg_dev, function);
}
typedef void (*fsg_routine_t)(struct fsg_dev *);
static int send_status(struct fsg_common *common);
static int exception_in_progress(struct fsg_common *common)
{
return common->state > FSG_STATE_IDLE;
}
/* Make bulk-out requests be divisible by the maxpacket size */
static void set_bulk_out_req_length(struct fsg_common *common,
struct fsg_buffhd *bh, unsigned int length)
{
unsigned int rem;
bh->bulk_out_intended_length = length;
rem = length % common->bulk_out_maxpacket;
if (rem > 0)
length += common->bulk_out_maxpacket - rem;
bh->outreq->length = length;
}
/*-------------------------------------------------------------------------*/
static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
{
const char *name;
if (ep == fsg->bulk_in)
name = "bulk-in";
else if (ep == fsg->bulk_out)
name = "bulk-out";
else
name = ep->name;
DBG(fsg, "%s set halt\n", name);
return usb_ep_set_halt(ep);
}
/*-------------------------------------------------------------------------*/
/* These routines may be called in process context or in_irq */
/* Caller must hold fsg->lock */
static void wakeup_thread(struct fsg_common *common)
{
smp_wmb(); /* ensure the write of bh->state is complete */
/* Tell the main thread that something has happened */
common->thread_wakeup_needed = 1;
if (common->thread_task)
wake_up_process(common->thread_task);
}
static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
{
unsigned long flags;
/*
* Do nothing if a higher-priority exception is already in progress.
* If a lower-or-equal priority exception is in progress, preempt it
* and notify the main thread by sending it a signal.
*/
spin_lock_irqsave(&common->lock, flags);
if (common->state <= new_state) {
common->exception_req_tag = common->ep0_req_tag;
common->state = new_state;
if (common->thread_task)
send_sig_info(SIGUSR1, SEND_SIG_FORCED,
common->thread_task);
}
spin_unlock_irqrestore(&common->lock, flags);
}
/*-------------------------------------------------------------------------*/
static int ep0_queue(struct fsg_common *common)
{
int rc;
rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
common->ep0->driver_data = common;
if (rc != 0 && rc != -ESHUTDOWN) {
/* We can't do much more than wait for a reset */
WARNING(common, "error in submission: %s --> %d\n",
common->ep0->name, rc);
}
return rc;
}
/*-------------------------------------------------------------------------*/
/* Completion handlers. These always run in_irq. */
static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
{
struct fsg_common *common = ep->driver_data;
struct fsg_buffhd *bh = req->context;
if (req->status || req->actual != req->length)
pr_debug("%s --> %d, %u/%u\n", __func__,
req->status, req->actual, req->length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Hold the lock while we update the request and buffer states */
smp_wmb();
/*
* Disconnect and completion might race each other and driver data
* is set to NULL during ep disable. So, add a check if that is case.
*/
if (!common) {
bh->inreq_busy = 0;
bh->state = BUF_STATE_EMPTY;
return;
}
spin_lock(&common->lock);
bh->inreq_busy = 0;
bh->state = BUF_STATE_EMPTY;
wakeup_thread(common);
spin_unlock(&common->lock);
}
static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
{
struct fsg_common *common = ep->driver_data;
struct fsg_buffhd *bh = req->context;
dump_msg(common, "bulk-out", req->buf, req->actual);
if (req->status || req->actual != bh->bulk_out_intended_length)
DBG(common, "%s --> %d, %u/%u\n", __func__,
req->status, req->actual, bh->bulk_out_intended_length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Hold the lock while we update the request and buffer states */
smp_wmb();
spin_lock(&common->lock);
bh->outreq_busy = 0;
bh->state = BUF_STATE_FULL;
wakeup_thread(common);
spin_unlock(&common->lock);
}
static int fsg_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct fsg_dev *fsg = fsg_from_func(f);
struct usb_request *req = fsg->common->ep0req;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
if (!fsg_is_set(fsg->common))
return -EOPNOTSUPP;
++fsg->common->ep0_req_tag; /* Record arrival of a new request */
req->context = NULL;
req->length = 0;
dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
switch (ctrl->bRequest) {
case US_BULK_RESET_REQUEST:
if (ctrl->bRequestType !=
(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
if (w_index != fsg->interface_number || w_value != 0 ||
w_length != 0)
return -EDOM;
/*
* Raise an exception to stop the current operation
* and reinitialize our state.
*/
DBG(fsg, "bulk reset request\n");
raise_exception(fsg->common, FSG_STATE_RESET);
return USB_GADGET_DELAYED_STATUS;
case US_BULK_GET_MAX_LUN:
if (ctrl->bRequestType !=
(USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
if (w_index != fsg->interface_number || w_value != 0 ||
w_length != 1)
return -EDOM;
VDBG(fsg, "get max LUN\n");
*(u8 *)req->buf = fsg->common->nluns - 1;
/* Respond with data/status */
req->length = min((u16)1, w_length);
return ep0_queue(fsg->common);
}
VDBG(fsg,
"unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
ctrl->bRequestType, ctrl->bRequest,
le16_to_cpu(ctrl->wValue), w_index, w_length);
return -EOPNOTSUPP;
}
/*-------------------------------------------------------------------------*/
/* All the following routines run in process context */
/* Use this for bulk or interrupt transfers, not ep0 */
static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
struct usb_request *req, int *pbusy,
enum fsg_buffer_state *state)
{
int rc;
if (ep == fsg->bulk_in)
dump_msg(fsg, "bulk-in", req->buf, req->length);
spin_lock_irq(&fsg->common->lock);
*pbusy = 1;
*state = BUF_STATE_BUSY;
spin_unlock_irq(&fsg->common->lock);
rc = usb_ep_queue(ep, req, GFP_KERNEL);
if (rc == 0)
return; /* All good, we're done */
*pbusy = 0;
*state = BUF_STATE_EMPTY;
/* We can't do much more than wait for a reset */
/*
* Note: currently the net2280 driver fails zero-length
* submissions if DMA is enabled.
*/
if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP && req->length == 0))
WARNING(fsg, "error in submission: %s --> %d\n", ep->name, rc);
}
static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
if (!fsg_is_set(common))
return false;
start_transfer(common->fsg, common->fsg->bulk_in,
bh->inreq, &bh->inreq_busy, &bh->state);
return true;
}
static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
if (!fsg_is_set(common))
return false;
start_transfer(common->fsg, common->fsg->bulk_out,
bh->outreq, &bh->outreq_busy, &bh->state);
return true;
}
static int sleep_thread(struct fsg_common *common, bool can_freeze)
{
int rc = 0;
/* Wait until a signal arrives or we are woken up */
for (;;) {
if (can_freeze)
try_to_freeze();
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
rc = -EINTR;
break;
}
spin_lock_irq(&common->lock);
if (common->thread_wakeup_needed) {
spin_unlock_irq(&common->lock);
break;
}
spin_unlock_irq(&common->lock);
schedule();
}
__set_current_state(TASK_RUNNING);
spin_lock_irq(&common->lock);
common->thread_wakeup_needed = 0;
spin_unlock_irq(&common->lock);
smp_rmb(); /* ensure the latest bh->state is visible */
return rc;
}
/*-------------------------------------------------------------------------*/
static int do_read(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
u32 lba;
struct fsg_buffhd *bh;
int rc;
u32 amount_left;
loff_t file_offset, file_offset_tmp;
unsigned int amount;
ssize_t nread;
ktime_t start, diff;
/*
* Get the starting Logical Block Address and check that it's
* not too big.
*/
if (common->cmnd[0] == READ_6)
lba = get_unaligned_be24(&common->cmnd[1]);
else {
lba = get_unaligned_be32(&common->cmnd[2]);
/*
* We allow DPO (Disable Page Out = don't save data in the
* cache) and FUA (Force Unit Access = don't read from the
* cache), but we don't implement them.
*/
if ((common->cmnd[1] & ~0x18) != 0) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
}
if (lba >= curlun->num_sectors) {
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
file_offset = ((loff_t) lba) << curlun->blkbits;
/* Carry out the file reads */
amount_left = common->data_size_from_cmnd;
if (unlikely(amount_left == 0))
return -EIO; /* No default reply */
for (;;) {
/*
* Figure out how much we need to read:
* Try to read the remaining amount.
* But don't read more than the buffer size.
* And don't try to read past the end of the file.
*/
amount = min(amount_left, FSG_BUFLEN);
amount = min((loff_t)amount,
curlun->file_length - file_offset);
/* Wait for the next buffer to become available */
spin_lock_irq(&common->lock);
bh = common->next_buffhd_to_fill;
while (bh->state != BUF_STATE_EMPTY) {
spin_unlock_irq(&common->lock);
rc = sleep_thread(common, false);
if (rc)
return rc;
spin_lock_irq(&common->lock);
}
spin_unlock_irq(&common->lock);
/*
* If we were asked to read past the end of file,
* end with an empty buffer.
*/
if (amount == 0) {
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
spin_lock_irq(&common->lock);
bh->inreq->length = 0;
bh->state = BUF_STATE_FULL;
spin_unlock_irq(&common->lock);
break;
}
/* Perform the read */
file_offset_tmp = file_offset;
start = ktime_get();
mod_timer(&common->vfs_timer, jiffies +
msecs_to_jiffies(msc_vfs_timer_period_ms));
nread = vfs_read(curlun->filp,
(char __user *)bh->buf,
amount, &file_offset_tmp);
del_timer_sync(&common->vfs_timer);
VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
(unsigned long long)file_offset, (int)nread);
diff = ktime_sub(ktime_get(), start);
curlun->perf.rbytes += nread;
curlun->perf.rtime = ktime_add(curlun->perf.rtime, diff);
if (signal_pending(current))
return -EINTR;
if (nread < 0) {
LDBG(curlun, "error in file read: %d\n", (int)nread);
nread = 0;
} else if (nread < amount) {
LDBG(curlun, "partial file read: %d/%u\n",
(int)nread, amount);
nread = round_down(nread, curlun->blksize);
}
file_offset += nread;
amount_left -= nread;
common->residue -= nread;
/*
* Except at the end of the transfer, nread will be
* equal to the buffer size, which is divisible by the
* bulk-in maxpacket size.
*/
spin_lock_irq(&common->lock);
bh->inreq->length = nread;
bh->state = BUF_STATE_FULL;
spin_unlock_irq(&common->lock);
/* If an error occurred, report it and its position */
if (nread < amount) {
curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
if (amount_left == 0)
break; /* No more left to read */
/* Send this buffer and go read some more */
bh->inreq->zero = 0;
if (!start_in_transfer(common, bh))
/* Don't know what to do if common->fsg is NULL */
return -EIO;
common->next_buffhd_to_fill = bh->next;
}
return -EIO; /* No default reply */
}
/*-------------------------------------------------------------------------*/
static int do_write(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
u32 lba;
struct fsg_buffhd *bh;
int get_some_more;
u32 amount_left_to_req, amount_left_to_write;
loff_t usb_offset, file_offset, file_offset_tmp;
unsigned int amount;
ssize_t nwritten;
ktime_t start, diff;
int rc, i;
if (curlun->ro) {
curlun->sense_data = SS_WRITE_PROTECTED;
return -EINVAL;
}
spin_lock(&curlun->filp->f_lock);
curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
spin_unlock(&curlun->filp->f_lock);
/*
* Get the starting Logical Block Address and check that it's
* not too big
*/
if (common->cmnd[0] == WRITE_6)
lba = get_unaligned_be24(&common->cmnd[1]);
else {
lba = get_unaligned_be32(&common->cmnd[2]);
/*
* We allow DPO (Disable Page Out = don't save data in the
* cache) and FUA (Force Unit Access = write directly to the
* medium). We don't implement DPO; we implement FUA by
* performing synchronous output.
*/
if (common->cmnd[1] & ~0x18) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
spin_lock(&curlun->filp->f_lock);
curlun->filp->f_flags |= O_SYNC;
spin_unlock(&curlun->filp->f_lock);
}
}
if (lba >= curlun->num_sectors) {
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
/* Carry out the file writes */
get_some_more = 1;
file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
amount_left_to_req = common->data_size_from_cmnd;
amount_left_to_write = common->data_size_from_cmnd;
while (amount_left_to_write > 0) {
/* Queue a request for more data from the host */
bh = common->next_buffhd_to_fill;
if (bh->state == BUF_STATE_EMPTY && get_some_more) {
/*
* Figure out how much we want to get:
* Try to get the remaining amount,
* but not more than the buffer size.
*/
amount = min(amount_left_to_req, FSG_BUFLEN);
/* Beyond the end of the backing file? */
if (usb_offset >= curlun->file_length) {
get_some_more = 0;
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
curlun->sense_data_info =
usb_offset >> curlun->blkbits;
curlun->info_valid = 1;
continue;
}
/* Get the next buffer */
usb_offset += amount;
common->usb_amount_left -= amount;
amount_left_to_req -= amount;
if (amount_left_to_req == 0)
get_some_more = 0;
/*
* Except at the end of the transfer, amount will be
* equal to the buffer size, which is divisible by
* the bulk-out maxpacket size.
*/
set_bulk_out_req_length(common, bh, amount);
if (!start_out_transfer(common, bh))
/* Dunno what to do if common->fsg is NULL */
return -EIO;
common->next_buffhd_to_fill = bh->next;
continue;
}
/* Write the received data to the backing file */
bh = common->next_buffhd_to_drain;
if (bh->state == BUF_STATE_EMPTY && !get_some_more)
break; /* We stopped early */
/*
* If the csw packet is already submmitted to the hardware,
* by marking the state of buffer as full, then by checking
* the residue, we make sure that this csw packet is not
* written on to the storage media.
*/
if (bh->state == BUF_STATE_FULL && common->residue) {
smp_rmb();
common->next_buffhd_to_drain = bh->next;
bh->state = BUF_STATE_EMPTY;
/* Did something go wrong with the transfer? */
if (bh->outreq->status != 0) {
curlun->sense_data = SS_COMMUNICATION_FAILURE;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
amount = bh->outreq->actual;
if (curlun->file_length - file_offset < amount) {
LERROR(curlun,
"write %u @ %llu beyond end %llu\n",
amount, (unsigned long long)file_offset,
(unsigned long long)curlun->file_length);
amount = curlun->file_length - file_offset;
}
/* Don't accept excess data. The spec doesn't say
* what to do in this case. We'll ignore the error.
*/
amount = min(amount, bh->bulk_out_intended_length);
/* Don't write a partial block */
amount = round_down(amount, curlun->blksize);
if (amount == 0)
goto empty_write;
/* Perform the write */
file_offset_tmp = file_offset;
start = ktime_get();
mod_timer(&common->vfs_timer, jiffies +
msecs_to_jiffies(msc_vfs_timer_period_ms));
nwritten = vfs_write(curlun->filp,
(char __user *)bh->buf,
amount, &file_offset_tmp);
del_timer_sync(&common->vfs_timer);
VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
(unsigned long long)file_offset, (int)nwritten);
diff = ktime_sub(ktime_get(), start);
curlun->perf.wbytes += nwritten;
curlun->perf.wtime =
ktime_add(curlun->perf.wtime, diff);
if (signal_pending(current))
return -EINTR; /* Interrupted! */
if (nwritten < 0) {
LDBG(curlun, "error in file write: %d\n",
(int)nwritten);
nwritten = 0;
} else if (nwritten < amount) {
LDBG(curlun, "partial file write: %d/%u\n",
(int)nwritten, amount);
nwritten = round_down(nwritten, curlun->blksize);
}
file_offset += nwritten;
amount_left_to_write -= nwritten;
common->residue -= nwritten;
/* If an error occurred, report it and its position */
if (nwritten < amount) {
curlun->sense_data = SS_WRITE_ERROR;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
write_error_after_csw_sent = 1;
goto write_error;
break;
}
write_error:
if ((nwritten == amount) && !csw_sent) {
if (write_error_after_csw_sent)
break;
/*
* If residue still exists and nothing left to
* write, device must send correct residue to
* host in this case.
*/
if (!amount_left_to_write && common->residue) {
must_report_residue = 1;
break;
}
/*
* Check if any of the buffer is in the
* busy state, if any buffer is in busy state,
* means the complete data is not received
* yet from the host. So there is no point in
* csw right away without the complete data.
*/
for (i = 0; i < common->fsg_num_buffers; i++) {
if (common->buffhds[i].state ==
BUF_STATE_BUSY)
break;
}
if (!amount_left_to_req &&
i == common->fsg_num_buffers) {
csw_sent = 1;
send_status(common);
}
}
empty_write:
/* Did the host decide to stop early? */
if (bh->outreq->actual < bh->bulk_out_intended_length) {
common->short_packet_received = 1;
break;
}
continue;
}
/* Wait for something to happen */
rc = sleep_thread(common, false);
if (rc)
return rc;
}
return -EIO; /* No default reply */
}
/*-------------------------------------------------------------------------*/
static int do_synchronize_cache(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
int rc;
/* We ignore the requested LBA and write out all file's
* dirty data buffers. */
mod_timer(&common->vfs_timer, jiffies +
msecs_to_jiffies(msc_vfs_timer_period_ms));
rc = fsg_lun_fsync_sub(curlun);
if (rc)
curlun->sense_data = SS_WRITE_ERROR;
del_timer_sync(&common->vfs_timer);
return 0;
}
/*-------------------------------------------------------------------------*/
static void invalidate_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
struct inode *inode = file_inode(filp);
unsigned long rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
}
static int do_verify(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
u32 lba;
u32 verification_length;
struct fsg_buffhd *bh = common->next_buffhd_to_fill;
loff_t file_offset, file_offset_tmp;
u32 amount_left;
unsigned int amount;
ssize_t nread;
/*
* Get the starting Logical Block Address and check that it's
* not too big.
*/
lba = get_unaligned_be32(&common->cmnd[2]);
if (lba >= curlun->num_sectors) {
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
/*
* We allow DPO (Disable Page Out = don't save data in the
* cache) but we don't implement it.
*/
if (common->cmnd[1] & ~0x10) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
verification_length = get_unaligned_be16(&common->cmnd[7]);
if (unlikely(verification_length == 0))
return -EIO; /* No default reply */
/* Prepare to carry out the file verify */
amount_left = verification_length << curlun->blkbits;
file_offset = ((loff_t) lba) << curlun->blkbits;
/* Write out all the dirty buffers before invalidating them */
mod_timer(&common->vfs_timer, jiffies +
msecs_to_jiffies(msc_vfs_timer_period_ms));
fsg_lun_fsync_sub(curlun);
del_timer_sync(&common->vfs_timer);
if (signal_pending(current))
return -EINTR;
invalidate_sub(curlun);
if (signal_pending(current))
return -EINTR;
/* Just try to read the requested blocks */
while (amount_left > 0) {
/*
* Figure out how much we need to read:
* Try to read the remaining amount, but not more than
* the buffer size.
* And don't try to read past the end of the file.
*/
amount = min(amount_left, FSG_BUFLEN);
amount = min((loff_t)amount,
curlun->file_length - file_offset);
if (amount == 0) {
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
/* Perform the read */
file_offset_tmp = file_offset;
mod_timer(&common->vfs_timer, jiffies +
msecs_to_jiffies(msc_vfs_timer_period_ms));
nread = vfs_read(curlun->filp,
(char __user *) bh->buf,
amount, &file_offset_tmp);
del_timer_sync(&common->vfs_timer);
VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
(unsigned long long) file_offset,
(int) nread);
if (signal_pending(current))
return -EINTR;
if (nread < 0) {
LDBG(curlun, "error in file verify: %d\n", (int)nread);
nread = 0;
} else if (nread < amount) {
LDBG(curlun, "partial file verify: %d/%u\n",
(int)nread, amount);
nread = round_down(nread, curlun->blksize);
}
if (nread == 0) {
curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
curlun->sense_data_info =
file_offset >> curlun->blkbits;
curlun->info_valid = 1;
break;
}
file_offset += nread;
amount_left -= nread;
}
return 0;
}
/*-------------------------------------------------------------------------*/
static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
u8 *buf = (u8 *) bh->buf;
if (!curlun) { /* Unsupported LUNs are okay */
common->bad_lun_okay = 1;
memset(buf, 0, 36);
buf[0] = 0x7f; /* Unsupported, no device-type */
buf[4] = 31; /* Additional length */
return 36;
}
buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
buf[1] = curlun->removable ? 0x80 : 0;
buf[2] = 2; /* ANSI SCSI level 2 */
buf[3] = 2; /* SCSI-2 INQUIRY data format */
buf[4] = 31; /* Additional length */
buf[5] = 0; /* No special options */
buf[6] = 0;
buf[7] = 0;
memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
return 36;
}
static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
u8 *buf = (u8 *) bh->buf;
u32 sd, sdinfo;
int valid;
/*
* From the SCSI-2 spec., section 7.9 (Unit attention condition):
*
* If a REQUEST SENSE command is received from an initiator
* with a pending unit attention condition (before the target
* generates the contingent allegiance condition), then the
* target shall either:
* a) report any pending sense data and preserve the unit
* attention condition on the logical unit, or,
* b) report the unit attention condition, may discard any
* pending sense data, and clear the unit attention
* condition on the logical unit for that initiator.
*
* FSG normally uses option a); enable this code to use option b).
*/
#if 0
if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
curlun->sense_data = curlun->unit_attention_data;
curlun->unit_attention_data = SS_NO_SENSE;
}
#endif
if (!curlun) { /* Unsupported LUNs are okay */
common->bad_lun_okay = 1;
sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
sdinfo = 0;
valid = 0;
} else {
sd = curlun->sense_data;
sdinfo = curlun->sense_data_info;
valid = curlun->info_valid << 7;
curlun->sense_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
memset(buf, 0, 18);
buf[0] = valid | 0x70; /* Valid, current error */
buf[2] = SK(sd);
put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
buf[7] = 18 - 8; /* Additional sense length */
buf[12] = ASC(sd);
buf[13] = ASCQ(sd);
return 18;
}
static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
u32 lba = get_unaligned_be32(&common->cmnd[2]);
int pmi = common->cmnd[8];
u8 *buf = (u8 *)bh->buf;
/* Check the PMI and LBA fields */
if (pmi > 1 || (pmi == 0 && lba != 0)) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
/* Max logical block */
put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
return 8;
}
static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
int msf = common->cmnd[1] & 0x02;
u32 lba = get_unaligned_be32(&common->cmnd[2]);
u8 *buf = (u8 *)bh->buf;
if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
if (lba >= curlun->num_sectors) {
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
memset(buf, 0, 8);
buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
store_cdrom_address(&buf[4], msf, lba);
return 8;
}
static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
int msf = common->cmnd[1] & 0x02;
int start_track = common->cmnd[6];
u8 *buf = (u8 *)bh->buf;
if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
start_track > 1) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
memset(buf, 0, 20);
buf[1] = (20-2); /* TOC data length */
buf[2] = 1; /* First track number */
buf[3] = 1; /* Last track number */
buf[5] = 0x16; /* Data track, copying allowed */
buf[6] = 0x01; /* Only track is number 1 */
store_cdrom_address(&buf[8], msf, 0);
buf[13] = 0x16; /* Lead-out track is data */
buf[14] = 0xAA; /* Lead-out track number */
store_cdrom_address(&buf[16], msf, curlun->num_sectors);
return 20;
}
static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
int mscmnd = common->cmnd[0];
u8 *buf = (u8 *) bh->buf;
u8 *buf0 = buf;
int pc, page_code;
int changeable_values, all_pages;
int valid_page = 0;
int len, limit;
if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
pc = common->cmnd[2] >> 6;
page_code = common->cmnd[2] & 0x3f;
if (pc == 3) {
curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
return -EINVAL;
}
changeable_values = (pc == 1);
all_pages = (page_code == 0x3f);
/*
* Write the mode parameter header. Fixed values are: default
* medium type, no cache control (DPOFUA), and no block descriptors.
* The only variable value is the WriteProtect bit. We will fill in
* the mode data length later.
*/
memset(buf, 0, 8);
if (mscmnd == MODE_SENSE) {
buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
buf += 4;
limit = 255;
} else { /* MODE_SENSE_10 */
buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
buf += 8;
limit = 65535; /* Should really be FSG_BUFLEN */
}
/* No block descriptors */
/*
* The mode pages, in numerical order. The only page we support
* is the Caching page.
*/
if (page_code == 0x08 || all_pages) {
valid_page = 1;
buf[0] = 0x08; /* Page code */
buf[1] = 10; /* Page length */
memset(buf+2, 0, 10); /* None of the fields are changeable */
if (!changeable_values) {
buf[2] = 0x04; /* Write cache enable, */
/* Read cache not disabled */
/* No cache retention priorities */
put_unaligned_be16(0xffff, &buf[4]);
/* Don't disable prefetch */
/* Minimum prefetch = 0 */
put_unaligned_be16(0xffff, &buf[8]);
/* Maximum prefetch */
put_unaligned_be16(0xffff, &buf[10]);
/* Maximum prefetch ceiling */
}
buf += 12;
}
/*
* Check that a valid page was requested and the mode data length
* isn't too long.
*/
len = buf - buf0;
if (!valid_page || len > limit) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
/* Store the mode data length */
if (mscmnd == MODE_SENSE)
buf0[0] = len - 1;
else
put_unaligned_be16(len - 2, buf0);
return len;
}
static int do_start_stop(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
int loej, start;
if (!curlun) {
return -EINVAL;
} else if (!curlun->removable) {
curlun->sense_data = SS_INVALID_COMMAND;
return -EINVAL;
} else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
(common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
loej = common->cmnd[4] & 0x02;
start = common->cmnd[4] & 0x01;
/*
* Our emulation doesn't support mounting; the medium is
* available for use as soon as it is loaded.
*/
if (start) {
if (!fsg_lun_is_open(curlun)) {
curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
return -EINVAL;
}
return 0;
}
/* Are we allowed to unload the media? */
if (curlun->prevent_medium_removal) {
LDBG(curlun, "unload attempt prevented\n");
curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
return -EINVAL;
}
if (!loej)
return 0;
up_read(&common->filesem);
down_write(&common->filesem);
fsg_lun_close(curlun);
up_write(&common->filesem);
down_read(&common->filesem);
return 0;
}
static int do_prevent_allow(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
int prevent;
if (!common->curlun) {
return -EINVAL;
} else if (!common->curlun->removable) {
common->curlun->sense_data = SS_INVALID_COMMAND;
return -EINVAL;
}
prevent = common->cmnd[4] & 0x01;
if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
if (!curlun->nofua && curlun->prevent_medium_removal && !prevent) {
mod_timer(&common->vfs_timer, jiffies +
msecs_to_jiffies(msc_vfs_timer_period_ms));
fsg_lun_fsync_sub(curlun);
del_timer_sync(&common->vfs_timer);
}
curlun->prevent_medium_removal = prevent;
return 0;
}
static int do_read_format_capacities(struct fsg_common *common,
struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
u8 *buf = (u8 *) bh->buf;
buf[0] = buf[1] = buf[2] = 0;
buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
buf += 4;
put_unaligned_be32(curlun->num_sectors, &buf[0]);
/* Number of blocks */
put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
buf[4] = 0x02; /* Current capacity */
return 12;
}
static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
/* We don't support MODE SELECT */
if (curlun)
curlun->sense_data = SS_INVALID_COMMAND;
return -EINVAL;
}
/*-------------------------------------------------------------------------*/
static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
{
int rc;
rc = fsg_set_halt(fsg, fsg->bulk_in);
if (rc == -EAGAIN)
VDBG(fsg, "delayed bulk-in endpoint halt\n");
while (rc != 0) {
if (rc != -EAGAIN) {
WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
rc = 0;
break;
}
/* Wait for a short time and then try again */
if (msleep_interruptible(100) != 0)
return -EINTR;
rc = usb_ep_set_halt(fsg->bulk_in);
}
return rc;
}
static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
{
int rc;
DBG(fsg, "bulk-in set wedge\n");
rc = usb_ep_set_wedge(fsg->bulk_in);
if (rc == -EAGAIN)
VDBG(fsg, "delayed bulk-in endpoint wedge\n");
while (rc != 0) {
if (rc != -EAGAIN) {
WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
rc = 0;
break;
}
/* Wait for a short time and then try again */
if (msleep_interruptible(100) != 0)
return -EINTR;
rc = usb_ep_set_wedge(fsg->bulk_in);
}
return rc;
}
static int throw_away_data(struct fsg_common *common)
{
struct fsg_buffhd *bh;
u32 amount;
int rc;
for (bh = common->next_buffhd_to_drain;
bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
bh = common->next_buffhd_to_drain) {
/* Throw away the data in a filled buffer */
if (bh->state == BUF_STATE_FULL) {
smp_rmb();
bh->state = BUF_STATE_EMPTY;
common->next_buffhd_to_drain = bh->next;
/* A short packet or an error ends everything */
if (bh->outreq->actual < bh->bulk_out_intended_length ||
bh->outreq->status != 0) {
raise_exception(common,
FSG_STATE_ABORT_BULK_OUT);
return -EINTR;
}
continue;
}
/* Try to submit another request if we need one */
bh = common->next_buffhd_to_fill;
if (bh->state == BUF_STATE_EMPTY
&& common->usb_amount_left > 0) {
amount = min(common->usb_amount_left, FSG_BUFLEN);
/*
* Except at the end of the transfer, amount will be
* equal to the buffer size, which is divisible by
* the bulk-out maxpacket size.
*/
set_bulk_out_req_length(common, bh, amount);
if (!start_out_transfer(common, bh))
/* Dunno what to do if common->fsg is NULL */
return -EIO;
common->next_buffhd_to_fill = bh->next;
common->usb_amount_left -= amount;
continue;
}
/* Otherwise wait for something to happen */
rc = sleep_thread(common, true);
if (rc)
return rc;
}
return 0;
}
static int finish_reply(struct fsg_common *common)
{
struct fsg_buffhd *bh = common->next_buffhd_to_fill;
int rc = 0;
switch (common->data_dir) {
case DATA_DIR_NONE:
break; /* Nothing to send */
/*
* If we don't know whether the host wants to read or write,
* this must be CB or CBI with an unknown command. We mustn't
* try to send or receive any data. So stall both bulk pipes
* if we can and wait for a reset.
*/
case DATA_DIR_UNKNOWN:
if (!common->can_stall) {
/* Nothing */
} else if (fsg_is_set(common)) {
fsg_set_halt(common->fsg, common->fsg->bulk_out);
rc = halt_bulk_in_endpoint(common->fsg);
} else {
/* Don't know what to do if common->fsg is NULL */
rc = -EIO;
}
break;
/* All but the last buffer of data must have already been sent */
case DATA_DIR_TO_HOST:
if (common->data_size == 0) {
/* Nothing to send */
/* Don't know what to do if common->fsg is NULL */
} else if (!fsg_is_set(common)) {
rc = -EIO;
/* If there's no residue, simply send the last buffer */
} else if (common->residue == 0) {
bh->inreq->zero = 0;
if (!start_in_transfer(common, bh))
return -EIO;
common->next_buffhd_to_fill = bh->next;
/*
* For Bulk-only, mark the end of the data with a short
* packet. If we are allowed to stall, halt the bulk-in
* endpoint. (Note: This violates the Bulk-Only Transport
* specification, which requires us to pad the data if we
* don't halt the endpoint. Presumably nobody will mind.)
*/
} else {
bh->inreq->zero = 1;
if (!start_in_transfer(common, bh))
rc = -EIO;
common->next_buffhd_to_fill = bh->next;
if (common->can_stall)
rc = halt_bulk_in_endpoint(common->fsg);
}
break;
/*
* We have processed all we want from the data the host has sent.
* There may still be outstanding bulk-out requests.
*/
case DATA_DIR_FROM_HOST:
if (common->residue == 0) {
/* Nothing to receive */
/* Did the host stop sending unexpectedly early? */
} else if (common->short_packet_received) {
raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
rc = -EINTR;
/*
* We haven't processed all the incoming data. Even though
* we may be allowed to stall, doing so would cause a race.
* The controller may already have ACK'ed all the remaining
* bulk-out packets, in which case the host wouldn't see a
* STALL. Not realizing the endpoint was halted, it wouldn't
* clear the halt -- leading to problems later on.
*/
#if 0
} else if (common->can_stall) {
if (fsg_is_set(common))
fsg_set_halt(common->fsg,
common->fsg->bulk_out);
raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
rc = -EINTR;
#endif
/*
* We can't stall. Read in the excess data and throw it
* all away.
*/
} else {
rc = throw_away_data(common);
}
break;
}
return rc;
}
static int send_status(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
struct fsg_buffhd *bh;
struct bulk_cs_wrap *csw;
int rc;
u8 status = US_BULK_STAT_OK;
u32 sd, sdinfo = 0;
/* Wait for the next buffer to become available */
spin_lock_irq(&common->lock);
bh = common->next_buffhd_to_fill;
while (bh->state != BUF_STATE_EMPTY) {
spin_unlock_irq(&common->lock);
rc = sleep_thread(common, true);
if (rc)
return rc;
spin_lock_irq(&common->lock);
}
spin_unlock_irq(&common->lock);
if (curlun) {
sd = curlun->sense_data;
sdinfo = curlun->sense_data_info;
} else if (common->bad_lun_okay)
sd = SS_NO_SENSE;
else
sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
if (common->phase_error) {
DBG(common, "sending phase-error status\n");
status = US_BULK_STAT_PHASE;
sd = SS_INVALID_COMMAND;
} else if (sd != SS_NO_SENSE) {
DBG(common, "sending command-failure status\n");
status = US_BULK_STAT_FAIL;
VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
" info x%x\n",
SK(sd), ASC(sd), ASCQ(sd), sdinfo);
}
/* Store and send the Bulk-only CSW */
csw = (void *)bh->buf;
csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
csw->Tag = common->tag;
csw->Residue = cpu_to_le32(common->residue);
/*
* Since csw is being sent early, before
* writing on to storage media, need to set
* residue to zero,assuming that write will succeed.
*/
if (write_error_after_csw_sent || must_report_residue) {
write_error_after_csw_sent = 0;
must_report_residue = 0;
}
else
csw->Residue = 0;
csw->Status = status;
bh->inreq->length = US_BULK_CS_WRAP_LEN;
bh->inreq->zero = 0;
if (!start_in_transfer(common, bh))
/* Don't know what to do if common->fsg is NULL */
return -EIO;
common->next_buffhd_to_fill = bh->next;
return 0;
}
/*-------------------------------------------------------------------------*/
/*
* Check whether the command is properly formed and whether its data size
* and direction agree with the values we already have.
*/
static int check_command(struct fsg_common *common, int cmnd_size,
enum data_direction data_dir, unsigned int mask,
int needs_medium, const char *name)
{
int i;
unsigned int lun = common->cmnd[1] >> 5;
static const char dirletter[4] = {'u', 'o', 'i', 'n'};
char hdlen[20];
struct fsg_lun *curlun;
hdlen[0] = 0;
if (common->data_dir != DATA_DIR_UNKNOWN)
sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
common->data_size);
VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
name, cmnd_size, dirletter[(int) data_dir],
common->data_size_from_cmnd, common->cmnd_size, hdlen);
/*
* We can't reply at all until we know the correct data direction
* and size.
*/
if (common->data_size_from_cmnd == 0)
data_dir = DATA_DIR_NONE;
if (common->data_size < common->data_size_from_cmnd) {
/*
* Host data size < Device data size is a phase error.
* Carry out the command, but only transfer as much as
* we are allowed.
*/
common->data_size_from_cmnd = common->data_size;
common->phase_error = 1;
}
common->residue = common->data_size;
common->usb_amount_left = common->data_size;
/* Conflicting data directions is a phase error */
if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
common->phase_error = 1;
return -EINVAL;
}
/* Verify the length of the command itself */
if (cmnd_size != common->cmnd_size) {
/*
* Special case workaround: There are plenty of buggy SCSI
* implementations. Many have issues with cbw->Length
* field passing a wrong command size. For those cases we
* always try to work around the problem by using the length
* sent by the host side provided it is at least as large
* as the correct command length.
* Examples of such cases would be MS-Windows, which issues
* REQUEST SENSE with cbw->Length == 12 where it should
* be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
* REQUEST SENSE with cbw->Length == 10 where it should
* be 6 as well.
*/
if (cmnd_size <= common->cmnd_size) {
DBG(common, "%s is buggy! Expected length %d "
"but we got %d\n", name,
cmnd_size, common->cmnd_size);
cmnd_size = common->cmnd_size;
} else {
common->phase_error = 1;
return -EINVAL;
}
}
/* Check that the LUN values are consistent */
if (common->lun != lun)
DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n",
common->lun, lun);
/* Check the LUN */
curlun = common->curlun;
if (curlun) {
if (common->cmnd[0] != REQUEST_SENSE) {
curlun->sense_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
} else {
common->bad_lun_okay = 0;
/*
* INQUIRY and REQUEST SENSE commands are explicitly allowed
* to use unsupported LUNs; all others may not.
*/
if (common->cmnd[0] != INQUIRY &&
common->cmnd[0] != REQUEST_SENSE) {
DBG(common, "unsupported LUN %u\n", common->lun);
return -EINVAL;
}
}
/*
* If a unit attention condition exists, only INQUIRY and
* REQUEST SENSE commands are allowed; anything else must fail.
*/
if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
common->cmnd[0] != INQUIRY &&
common->cmnd[0] != REQUEST_SENSE) {
curlun->sense_data = curlun->unit_attention_data;
curlun->unit_attention_data = SS_NO_SENSE;
return -EINVAL;
}
/* Check that only command bytes listed in the mask are non-zero */
common->cmnd[1] &= 0x1f; /* Mask away the LUN */
for (i = 1; i < cmnd_size; ++i) {
if (common->cmnd[i] && !(mask & (1 << i))) {
if (curlun)
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
}
/* If the medium isn't mounted and the command needs to access
* it, return an error. */
if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
return -EINVAL;
}
return 0;
}
/* wrapper of check_command for data size in blocks handling */
static int check_command_size_in_blocks(struct fsg_common *common,
int cmnd_size, enum data_direction data_dir,
unsigned int mask, int needs_medium, const char *name)
{
if (common->curlun)
common->data_size_from_cmnd <<= common->curlun->blkbits;
return check_command(common, cmnd_size, data_dir,
mask, needs_medium, name);
}
static int do_scsi_command(struct fsg_common *common)
{
struct fsg_buffhd *bh;
int rc;
int reply = -EINVAL;
int i;
static char unknown[16];
dump_cdb(common);
/* Wait for the next buffer to become available for data or status */
spin_lock_irq(&common->lock);
bh = common->next_buffhd_to_fill;
common->next_buffhd_to_drain = bh;
while (bh->state != BUF_STATE_EMPTY) {
spin_unlock_irq(&common->lock);
rc = sleep_thread(common, true);
if (rc)
return rc;
spin_lock_irq(&common->lock);
}
spin_unlock_irq(&common->lock);
common->phase_error = 0;
common->short_packet_received = 0;
down_read(&common->filesem); /* We're using the backing file */
switch (common->cmnd[0]) {
case INQUIRY:
common->data_size_from_cmnd = common->cmnd[4];
reply = check_command(common, 6, DATA_DIR_TO_HOST,
(1<<4), 0,
"INQUIRY");
if (reply == 0)
reply = do_inquiry(common, bh);
break;
case MODE_SELECT:
common->data_size_from_cmnd = common->cmnd[4];
reply = check_command(common, 6, DATA_DIR_FROM_HOST,
(1<<1) | (1<<4), 0,
"MODE SELECT(6)");
if (reply == 0)
reply = do_mode_select(common, bh);
break;
case MODE_SELECT_10:
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_FROM_HOST,
(1<<1) | (3<<7), 0,
"MODE SELECT(10)");
if (reply == 0)
reply = do_mode_select(common, bh);
break;
case MODE_SENSE:
common->data_size_from_cmnd = common->cmnd[4];
reply = check_command(common, 6, DATA_DIR_TO_HOST,
(1<<1) | (1<<2) | (1<<4), 0,
"MODE SENSE(6)");
if (reply == 0)
reply = do_mode_sense(common, bh);
break;
case MODE_SENSE_10:
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(1<<1) | (1<<2) | (3<<7), 0,
"MODE SENSE(10)");
if (reply == 0)
reply = do_mode_sense(common, bh);
break;
case ALLOW_MEDIUM_REMOVAL:
common->data_size_from_cmnd = 0;
reply = check_command(common, 6, DATA_DIR_NONE,
(1<<4), 0,
"PREVENT-ALLOW MEDIUM REMOVAL");
if (reply == 0)
reply = do_prevent_allow(common);
break;
case READ_6:
i = common->cmnd[4];
common->data_size_from_cmnd = (i == 0) ? 256 : i;
reply = check_command_size_in_blocks(common, 6,
DATA_DIR_TO_HOST,
(7<<1) | (1<<4), 1,
"READ(6)");
if (reply == 0)
reply = do_read(common);
break;
case READ_10:
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command_size_in_blocks(common, 10,
DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)");
if (reply == 0)
reply = do_read(common);
break;
case READ_12:
common->data_size_from_cmnd =
get_unaligned_be32(&common->cmnd[6]);
reply = check_command_size_in_blocks(common, 12,
DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)");
if (reply == 0)
reply = do_read(common);
break;
case READ_CAPACITY:
common->data_size_from_cmnd = 8;
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(0xf<<2) | (1<<8), 1,
"READ CAPACITY");
if (reply == 0)
reply = do_read_capacity(common, bh);
break;
case READ_HEADER:
if (!common->curlun || !common->curlun->cdrom)
goto unknown_cmnd;
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(3<<7) | (0x1f<<1), 1,
"READ HEADER");
if (reply == 0)
reply = do_read_header(common, bh);
break;
case READ_TOC:
if (!common->curlun || !common->curlun->cdrom)
goto unknown_cmnd;
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(7<<6) | (1<<1), 1,
"READ TOC");
if (reply == 0)
reply = do_read_toc(common, bh);
break;
case READ_FORMAT_CAPACITIES:
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(3<<7), 1,
"READ FORMAT CAPACITIES");
if (reply == 0)
reply = do_read_format_capacities(common, bh);
break;
case REQUEST_SENSE:
common->data_size_from_cmnd = common->cmnd[4];
reply = check_command(common, 6, DATA_DIR_TO_HOST,
(1<<4), 0,
"REQUEST SENSE");
if (reply == 0)
reply = do_request_sense(common, bh);
break;
case START_STOP:
common->data_size_from_cmnd = 0;
reply = check_command(common, 6, DATA_DIR_NONE,
(1<<1) | (1<<4), 0,
"START-STOP UNIT");
if (reply == 0)
reply = do_start_stop(common);
break;
case SYNCHRONIZE_CACHE:
common->data_size_from_cmnd = 0;
reply = check_command(common, 10, DATA_DIR_NONE,
(0xf<<2) | (3<<7), 1,
"SYNCHRONIZE CACHE");
if (reply == 0)
reply = do_synchronize_cache(common);
break;
case TEST_UNIT_READY:
common->data_size_from_cmnd = 0;
reply = check_command(common, 6, DATA_DIR_NONE,
0, 1,
"TEST UNIT READY");
break;
/*
* Although optional, this command is used by MS-Windows. We
* support a minimal version: BytChk must be 0.
*/
case VERIFY:
common->data_size_from_cmnd = 0;
reply = check_command(common, 10, DATA_DIR_NONE,
(1<<1) | (0xf<<2) | (3<<7), 1,
"VERIFY");
if (reply == 0)
reply = do_verify(common);
break;
case WRITE_6:
i = common->cmnd[4];
common->data_size_from_cmnd = (i == 0) ? 256 : i;
reply = check_command_size_in_blocks(common, 6,
DATA_DIR_FROM_HOST,
(7<<1) | (1<<4), 1,
"WRITE(6)");
if (reply == 0)
reply = do_write(common);
break;
case WRITE_10:
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
reply = check_command_size_in_blocks(common, 10,
DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)");
if (reply == 0)
reply = do_write(common);
break;
case WRITE_12:
common->data_size_from_cmnd =
get_unaligned_be32(&common->cmnd[6]);
reply = check_command_size_in_blocks(common, 12,
DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)");
if (reply == 0)
reply = do_write(common);
break;
/*
* Some mandatory commands that we recognize but don't implement.
* They don't mean much in this setting. It's left as an exercise
* for anyone interested to implement RESERVE and RELEASE in terms
* of Posix locks.
*/
case FORMAT_UNIT:
case RELEASE:
case RESERVE:
case SEND_DIAGNOSTIC:
/* Fall through */
default:
unknown_cmnd:
common->data_size_from_cmnd = 0;
sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
reply = check_command(common, common->cmnd_size,
DATA_DIR_UNKNOWN, ~0, 0, unknown);
if (reply == 0) {
common->curlun->sense_data = SS_INVALID_COMMAND;
reply = -EINVAL;
}
break;
}
up_read(&common->filesem);
if (reply == -EINTR || signal_pending(current))
return -EINTR;
/* Set up the single reply buffer for finish_reply() */
if (reply == -EINVAL)
reply = 0; /* Error reply length */
if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
reply = min((u32)reply, common->data_size_from_cmnd);
bh->inreq->length = reply;
bh->state = BUF_STATE_FULL;
common->residue -= reply;
} /* Otherwise it's already set */
return 0;
}
/*-------------------------------------------------------------------------*/
static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
struct usb_request *req = bh->outreq;
struct bulk_cb_wrap *cbw = req->buf;
struct fsg_common *common = fsg->common;
/* Was this a real packet? Should it be ignored? */
if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
return -EINVAL;
/* Is the CBW valid? */
if (req->actual != US_BULK_CB_WRAP_LEN ||
cbw->Signature != cpu_to_le32(
US_BULK_CB_SIGN)) {
DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
req->actual,
le32_to_cpu(cbw->Signature));
/*
* The Bulk-only spec says we MUST stall the IN endpoint
* (6.6.1), so it's unavoidable. It also says we must
* retain this state until the next reset, but there's
* no way to tell the controller driver it should ignore
* Clear-Feature(HALT) requests.
*
* We aren't required to halt the OUT endpoint; instead
* we can simply accept and discard any data received
* until the next reset.
*/
wedge_bulk_in_endpoint(fsg);
set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
return -EINVAL;
}
/* Is the CBW meaningful? */
if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
"cmdlen %u\n",
cbw->Lun, cbw->Flags, cbw->Length);
/*
* We can do anything we want here, so let's stall the
* bulk pipes if we are allowed to.
*/
if (common->can_stall) {
fsg_set_halt(fsg, fsg->bulk_out);
halt_bulk_in_endpoint(fsg);
}
return -EINVAL;
}
/* Save the command for later */
common->cmnd_size = cbw->Length;
memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
if (cbw->Flags & US_BULK_FLAG_IN)
common->data_dir = DATA_DIR_TO_HOST;
else
common->data_dir = DATA_DIR_FROM_HOST;
common->data_size = le32_to_cpu(cbw->DataTransferLength);
if (common->data_size == 0)
common->data_dir = DATA_DIR_NONE;
common->lun = cbw->Lun;
if (common->lun < common->nluns)
common->curlun = common->luns[common->lun];
else
common->curlun = NULL;
common->tag = cbw->Tag;
return 0;
}
static int get_next_command(struct fsg_common *common)
{
struct fsg_buffhd *bh;
int rc = 0;
/* Wait for the next buffer to become available */
spin_lock_irq(&common->lock);
bh = common->next_buffhd_to_fill;
while (bh->state != BUF_STATE_EMPTY) {
spin_unlock_irq(&common->lock);
rc = sleep_thread(common, true);
if (rc)
return rc;
spin_lock_irq(&common->lock);
}
spin_unlock_irq(&common->lock);
/* Queue a request to read a Bulk-only CBW */
set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
if (!start_out_transfer(common, bh))
/* Don't know what to do if common->fsg is NULL */
return -EIO;
/*
* We will drain the buffer in software, which means we
* can reuse it for the next filling. No need to advance
* next_buffhd_to_fill.
*/
/* Wait for the CBW to arrive */
spin_lock_irq(&common->lock);
while (bh->state != BUF_STATE_FULL) {
spin_unlock_irq(&common->lock);
rc = sleep_thread(common, true);
if (rc)
return rc;
spin_lock_irq(&common->lock);
}
spin_unlock_irq(&common->lock);
smp_rmb();
rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
spin_lock_irq(&common->lock);
bh->state = BUF_STATE_EMPTY;
spin_unlock_irq(&common->lock);
return rc;
}
/*-------------------------------------------------------------------------*/
static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
struct usb_request **preq)
{
*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (*preq)
return 0;
ERROR(common, "can't allocate request for %s\n", ep->name);
return -ENOMEM;
}
/* Reset interface setting and re-init endpoint state (toggle etc). */
static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
{
struct fsg_dev *fsg;
int i, rc = 0;
if (common->running)
DBG(common, "reset interface\n");
reset:
/* Deallocate the requests */
if (common->fsg) {
fsg = common->fsg;
for (i = 0; i < common->fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &common->buffhds[i];
if (bh->inreq) {
usb_ep_free_request(fsg->bulk_in, bh->inreq);
bh->inreq = NULL;
}
if (bh->outreq) {
usb_ep_free_request(fsg->bulk_out, bh->outreq);
bh->outreq = NULL;
}
}
common->fsg = NULL;
wake_up(&common->fsg_wait);
}
common->running = 0;
if (!new_fsg || rc)
return rc;
common->fsg = new_fsg;
fsg = common->fsg;
/* Allocate the requests */
for (i = 0; i < common->fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &common->buffhds[i];
rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
if (rc)
goto reset;
rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
if (rc)
goto reset;
bh->inreq->buf = bh->outreq->buf = bh->buf;
bh->inreq->context = bh->outreq->context = bh;
bh->inreq->complete = bulk_in_complete;
bh->outreq->complete = bulk_out_complete;
}
common->running = 1;
for (i = 0; i < common->nluns; ++i)
if (common->luns[i])
common->luns[i]->unit_attention_data =
SS_RESET_OCCURRED;
return rc;
}
/****************************** ALT CONFIGS ******************************/
static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct fsg_dev *fsg = fsg_from_func(f);
struct fsg_common *common = fsg->common;
int rc;
/* Enable the endpoints */
rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
if (rc)
goto err_exit;
rc = usb_ep_enable(fsg->bulk_in);
if (rc)
goto err_exit;
fsg->bulk_in->driver_data = common;
fsg->bulk_in_enabled = 1;
rc = config_ep_by_speed(common->gadget, &(fsg->function),
fsg->bulk_out);
if (rc)
goto reset_bulk_int;
rc = usb_ep_enable(fsg->bulk_out);
if (rc)
goto reset_bulk_int;
fsg->bulk_out->driver_data = common;
fsg->bulk_out_enabled = 1;
common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
csw_sent = 0;
write_error_after_csw_sent = 0;
fsg->common->new_fsg = fsg;
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
return USB_GADGET_DELAYED_STATUS;
reset_bulk_int:
usb_ep_disable(fsg->bulk_in);
fsg->bulk_in_enabled = 0;
err_exit:
return rc;
}
static void fsg_disable(struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
/* Disable the endpoints */
if (fsg->bulk_in_enabled) {
usb_ep_disable(fsg->bulk_in);
fsg->bulk_in->driver_data = NULL;
fsg->bulk_in_enabled = 0;
}
if (fsg->bulk_out_enabled) {
usb_ep_disable(fsg->bulk_out);
fsg->bulk_out->driver_data = NULL;
fsg->bulk_out_enabled = 0;
}
fsg->common->new_fsg = NULL;
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
}
/*-------------------------------------------------------------------------*/
static void handle_exception(struct fsg_common *common)
{
siginfo_t info;
int i;
struct fsg_buffhd *bh;
enum fsg_state old_state;
struct fsg_lun *curlun;
unsigned int exception_req_tag;
unsigned long flags;
/*
* Clear the existing signals. Anything but SIGUSR1 is converted
* into a high-priority EXIT exception.
*/
for (;;) {
int sig =
dequeue_signal_lock(current, ¤t->blocked, &info);
if (!sig)
break;
if (sig != SIGUSR1) {
if (common->state < FSG_STATE_EXIT)
DBG(common, "Main thread exiting on signal\n");
WARN_ON(1);
pr_err("%s: signal(%d) received from PID(%d) UID(%d)\n",
__func__, sig, info.si_pid, info.si_uid);
raise_exception(common, FSG_STATE_EXIT);
}
}
/* Cancel all the pending transfers */
if (likely(common->fsg)) {
for (i = 0; i < common->fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
if (bh->inreq_busy)
usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
if (bh->outreq_busy)
usb_ep_dequeue(common->fsg->bulk_out,
bh->outreq);
}
/* Wait until everything is idle */
for (;;) {
int num_active = 0;
spin_lock_irq(&common->lock);
for (i = 0; i < common->fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
num_active += bh->inreq_busy + bh->outreq_busy;
}
spin_unlock_irq(&common->lock);
if (num_active == 0)
break;
if (sleep_thread(common, true))
return;
}
/* Clear out the controller's fifos */
if (common->fsg->bulk_in_enabled)
usb_ep_fifo_flush(common->fsg->bulk_in);
if (common->fsg->bulk_out_enabled)
usb_ep_fifo_flush(common->fsg->bulk_out);
}
/*
* Reset the I/O buffer states and pointers, the SCSI
* state, and the exception. Then invoke the handler.
*/
spin_lock_irqsave(&common->lock, flags);
for (i = 0; i < common->fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
bh->state = BUF_STATE_EMPTY;
}
common->next_buffhd_to_fill = &common->buffhds[0];
common->next_buffhd_to_drain = &common->buffhds[0];
exception_req_tag = common->exception_req_tag;
old_state = common->state;
if (old_state == FSG_STATE_ABORT_BULK_OUT)
common->state = FSG_STATE_STATUS_PHASE;
else {
for (i = 0; i < common->nluns; ++i) {
curlun = common->luns[i];
if (!curlun)
continue;
curlun->prevent_medium_removal = 0;
curlun->sense_data = SS_NO_SENSE;
curlun->unit_attention_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
common->state = FSG_STATE_IDLE;
}
spin_unlock_irqrestore(&common->lock, flags);
/* Carry out any extra actions required for the exception */
switch (old_state) {
case FSG_STATE_ABORT_BULK_OUT:
send_status(common);
spin_lock_irq(&common->lock);
if (common->state == FSG_STATE_STATUS_PHASE)
common->state = FSG_STATE_IDLE;
spin_unlock_irq(&common->lock);
break;
case FSG_STATE_RESET:
/*
* In case we were forced against our will to halt a
* bulk endpoint, clear the halt now. (The SuperH UDC
* requires this.)
*/
if (!fsg_is_set(common))
break;
if (test_and_clear_bit(IGNORE_BULK_OUT,
&common->fsg->atomic_bitflags))
usb_ep_clear_halt(common->fsg->bulk_in);
if (common->ep0_req_tag == exception_req_tag) {
/* Complete the status stage */
if (common->cdev)
usb_composite_setup_continue(common->cdev);
else
ep0_queue(common);
}
/*
* Technically this should go here, but it would only be
* a waste of time. Ditto for the INTERFACE_CHANGE and
* CONFIG_CHANGE cases.
*/
/* for (i = 0; i < common->nluns; ++i) */
/* if (common->luns[i]) */
/* common->luns[i]->unit_attention_data = */
/* SS_RESET_OCCURRED; */
break;
case FSG_STATE_CONFIG_CHANGE:
do_set_interface(common, common->new_fsg);
if (common->new_fsg)
usb_composite_setup_continue(common->cdev);
break;
case FSG_STATE_EXIT:
case FSG_STATE_TERMINATED:
do_set_interface(common, NULL); /* Free resources */
spin_lock_irq(&common->lock);
common->state = FSG_STATE_TERMINATED; /* Stop the thread */
spin_unlock_irq(&common->lock);
break;
case FSG_STATE_INTERFACE_CHANGE:
case FSG_STATE_DISCONNECT:
case FSG_STATE_COMMAND_PHASE:
case FSG_STATE_DATA_PHASE:
case FSG_STATE_STATUS_PHASE:
case FSG_STATE_IDLE:
break;
}
}
/*-------------------------------------------------------------------------*/
static int fsg_main_thread(void *common_)
{
struct fsg_common *common = common_;
/*
* Allow the thread to be killed by a signal, but set the signal mask
* to block everything but INT, TERM, KILL, and USR1.
*/
allow_signal(SIGINT);
allow_signal(SIGTERM);
allow_signal(SIGKILL);
allow_signal(SIGUSR1);
/* Allow the thread to be frozen */
set_freezable();
/*
* Arrange for userspace references to be interpreted as kernel
* pointers. That way we can pass a kernel pointer to a routine
* that expects a __user pointer and it will work okay.
*/
set_fs(get_ds());
/* The main loop */
while (common->state != FSG_STATE_TERMINATED) {
if (exception_in_progress(common) || signal_pending(current)) {
handle_exception(common);
continue;
}
if (!common->running) {
sleep_thread(common, true);
continue;
}
if (get_next_command(common))
continue;
spin_lock_irq(&common->lock);
if (!exception_in_progress(common))
common->state = FSG_STATE_DATA_PHASE;
spin_unlock_irq(&common->lock);
if (do_scsi_command(common) || finish_reply(common))
continue;
spin_lock_irq(&common->lock);
if (!exception_in_progress(common))
common->state = FSG_STATE_STATUS_PHASE;
spin_unlock_irq(&common->lock);
/*
* Since status is already sent for write scsi command,
* need to skip sending status once again if it is a
* write scsi command.
*/
if (csw_sent) {
csw_sent = 0;
continue;
}
if (send_status(common))
continue;
spin_lock_irq(&common->lock);
if (!exception_in_progress(common))
common->state = FSG_STATE_IDLE;
spin_unlock_irq(&common->lock);
}
spin_lock_irq(&common->lock);
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
if (!common->ops || !common->ops->thread_exits
|| common->ops->thread_exits(common) < 0) {
struct fsg_lun **curlun_it = common->luns;
unsigned i = common->nluns;
down_write(&common->filesem);
for (; i--; ++curlun_it) {
struct fsg_lun *curlun = *curlun_it;
if (!curlun || !fsg_lun_is_open(curlun))
continue;
fsg_lun_close(curlun);
curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
}
up_write(&common->filesem);
}
/* Let fsg_unbind() know the thread has exited */
complete_and_exit(&common->thread_notifier, 0);
}
/*************************** DEVICE ATTRIBUTES ***************************/
static ssize_t ro_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
return fsg_show_ro(curlun, buf);
}
static ssize_t nofua_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
return fsg_show_nofua(curlun, buf);
}
static ssize_t file_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
struct rw_semaphore *filesem = dev_get_drvdata(dev);
return fsg_show_file(curlun, filesem, buf);
}
static ssize_t ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
struct rw_semaphore *filesem = dev_get_drvdata(dev);
return fsg_store_ro(curlun, filesem, buf, count);
}
static ssize_t nofua_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
return fsg_store_nofua(curlun, buf, count);
}
static ssize_t file_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
struct rw_semaphore *filesem = dev_get_drvdata(dev);
return fsg_store_file(curlun, filesem, buf, count);
}
static DEVICE_ATTR_RW(ro);
static DEVICE_ATTR_RW(nofua);
static DEVICE_ATTR_RW(file);
static DEVICE_ATTR(perf, 0644, fsg_show_perf, fsg_store_perf);
static struct device_attribute dev_attr_ro_cdrom = __ATTR_RO(ro);
static struct device_attribute dev_attr_file_nonremovable = __ATTR_RO(file);
/****************************** FSG COMMON ******************************/
static void fsg_common_release(struct kref *ref);
static void fsg_lun_release(struct device *dev)
{
/* Nothing needs to be done */
}
void fsg_common_get(struct fsg_common *common)
{
kref_get(&common->ref);
}
EXPORT_SYMBOL_GPL(fsg_common_get);
void fsg_common_put(struct fsg_common *common)
{
kref_put(&common->ref, fsg_common_release);
}
EXPORT_SYMBOL_GPL(fsg_common_put);
/* check if fsg_num_buffers is within a valid range */
static inline int fsg_num_buffers_validate(unsigned int fsg_num_buffers)
{
if (fsg_num_buffers >= 2 && fsg_num_buffers <= 4)
return 0;
pr_err("fsg_num_buffers %u is out of range (%d to %d)\n",
fsg_num_buffers, 2, 4);
return -EINVAL;
}
static struct fsg_common *fsg_common_setup(struct fsg_common *common)
{
if (!common) {
common = kzalloc(sizeof(*common), GFP_KERNEL);
if (!common)
return ERR_PTR(-ENOMEM);
common->free_storage_on_release = 1;
} else {
common->free_storage_on_release = 0;
}
init_rwsem(&common->filesem);
spin_lock_init(&common->lock);
kref_init(&common->ref);
init_completion(&common->thread_notifier);
init_waitqueue_head(&common->fsg_wait);
common->state = FSG_STATE_TERMINATED;
return common;
}
void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs)
{
common->sysfs = sysfs;
}
EXPORT_SYMBOL_GPL(fsg_common_set_sysfs);
static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n)
{
if (buffhds) {
struct fsg_buffhd *bh = buffhds;
while (n--) {
kfree(bh->buf);
++bh;
}
kfree(buffhds);
}
}
int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n)
{
struct fsg_buffhd *bh, *buffhds;
int i, rc;
size_t extra_buf_alloc = 0;
if (common->gadget)
extra_buf_alloc = common->gadget->extra_buf_alloc;
rc = fsg_num_buffers_validate(n);
if (rc != 0)
return rc;
buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL);
if (!buffhds)
return -ENOMEM;
/* Data buffers cyclic list */
bh = buffhds;
i = n;
goto buffhds_first_it;
do {
bh->next = bh + 1;
++bh;
buffhds_first_it:
bh->buf = kmalloc(FSG_BUFLEN + extra_buf_alloc,
GFP_KERNEL);
if (unlikely(!bh->buf))
goto error_release;
} while (--i);
bh->next = buffhds;
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
common->fsg_num_buffers = n;
common->buffhds = buffhds;
return 0;
error_release:
/*
* "buf"s pointed to by heads after n - i are NULL
* so releasing them won't hurt
*/
_fsg_common_free_buffers(buffhds, n);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers);
static inline void fsg_common_remove_sysfs(struct fsg_lun *lun)
{
device_remove_file(&lun->dev, &dev_attr_nofua);
/*
* device_remove_file() =>
*
* here the attr (e.g. dev_attr_ro) is only used to be passed to:
*
* sysfs_remove_file() =>
*
* here e.g. both dev_attr_ro_cdrom and dev_attr_ro are in
* the same namespace and
* from here only attr->name is passed to:
*
* sysfs_hash_and_remove()
*
* attr->name is the same for dev_attr_ro_cdrom and
* dev_attr_ro
* attr->name is the same for dev_attr_file and
* dev_attr_file_nonremovable
*
* so we don't differentiate between removing e.g. dev_attr_ro_cdrom
* and dev_attr_ro
*/
device_remove_file(&lun->dev, &dev_attr_ro);
device_remove_file(&lun->dev, &dev_attr_file);
device_remove_file(&lun->dev, &dev_attr_perf);
}
void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs)
{
if (sysfs) {
fsg_common_remove_sysfs(lun);
device_unregister(&lun->dev);
}
fsg_lun_close(lun);
kfree(lun);
}
EXPORT_SYMBOL_GPL(fsg_common_remove_lun);
static void _fsg_common_remove_luns(struct fsg_common *common, int n)
{
int i;
for (i = 0; i < n; ++i)
if (common->luns[i]) {
fsg_common_remove_lun(common->luns[i], common->sysfs);
common->luns[i] = NULL;
}
}
EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
void fsg_common_remove_luns(struct fsg_common *common)
{
_fsg_common_remove_luns(common, common->nluns);
}
void fsg_common_free_luns(struct fsg_common *common)
{
unsigned long flags;
fsg_common_remove_luns(common);
spin_lock_irqsave(&common->lock, flags);
kfree(common->luns);
common->luns = NULL;
common->nluns = 0;
spin_unlock_irqrestore(&common->lock, flags);
}
EXPORT_SYMBOL_GPL(fsg_common_free_luns);
int fsg_common_set_nluns(struct fsg_common *common, int nluns)
{
struct fsg_lun **curlun;
/* Find out how many LUNs there should be */
if (nluns < 1 || nluns > FSG_MAX_LUNS) {
pr_err("invalid number of LUNs: %u\n", nluns);
return -EINVAL;
}
curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
if (unlikely(!curlun))
return -ENOMEM;
if (common->luns)
fsg_common_free_luns(common);
common->luns = curlun;
common->nluns = nluns;
pr_info("Number of LUNs=%d\n", common->nluns);
return 0;
}
EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
void fsg_common_set_ops(struct fsg_common *common,
const struct fsg_operations *ops)
{
common->ops = ops;
}
EXPORT_SYMBOL_GPL(fsg_common_set_ops);
void fsg_common_free_buffers(struct fsg_common *common)
{
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
common->buffhds = NULL;
}
EXPORT_SYMBOL_GPL(fsg_common_free_buffers);
int fsg_common_set_cdev(struct fsg_common *common,
struct usb_composite_dev *cdev, bool can_stall)
{
struct usb_string *us;
common->gadget = cdev->gadget;
common->ep0 = cdev->gadget->ep0;
common->ep0req = cdev->req;
common->cdev = cdev;
us = usb_gstrings_attach(cdev, fsg_strings_array,
ARRAY_SIZE(fsg_strings));
if (IS_ERR(us))
return PTR_ERR(us);
fsg_intf_desc.iInterface = us[FSG_STRING_INTERFACE].id;
/*
* Some peripheral controllers are known not to be able to
* halt bulk endpoints correctly. If one of them is present,
* disable stalls.
*/
common->can_stall = can_stall && !(gadget_is_at91(common->gadget));
return 0;
}
EXPORT_SYMBOL_GPL(fsg_common_set_cdev);
static inline int fsg_common_add_sysfs(struct fsg_common *common,
struct fsg_lun *lun)
{
int rc;
rc = device_register(&lun->dev);
if (rc) {
put_device(&lun->dev);
return rc;
}
rc = device_create_file(&lun->dev,
lun->cdrom
? &dev_attr_ro_cdrom
: &dev_attr_ro);
if (rc)
goto error;
rc = device_create_file(&lun->dev,
lun->removable
? &dev_attr_file
: &dev_attr_file_nonremovable);
if (rc)
goto error;
rc = device_create_file(&lun->dev, &dev_attr_nofua);
if (rc)
goto error;
rc = device_create_file(&lun->dev, &dev_attr_perf);
if (rc)
pr_err("failed to create sysfs entry: %d\n", rc);
return 0;
error:
/* removing nonexistent files is a no-op */
fsg_common_remove_sysfs(lun);
device_unregister(&lun->dev);
return rc;
}
int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
unsigned int id, const char *name,
const char **name_pfx)
{
struct fsg_lun *lun;
char *pathbuf, *p;
int rc = -ENOMEM;
if (!common->nluns || !common->luns)
return -ENODEV;
if (common->luns[id])
return -EBUSY;
#ifdef CONFIG_ZTEMT_USB
if (!cfg->filename && !cfg->removable && !cfg->cdrom) {
#else
if (!cfg->filename && !cfg->removable) {
#endif
pr_err("no file given for LUN%d\n", id);
return -EINVAL;
}
lun = kzalloc(sizeof(*lun), GFP_KERNEL);
if (!lun)
return -ENOMEM;
lun->name_pfx = name_pfx;
lun->cdrom = !!cfg->cdrom;
lun->ro = cfg->cdrom || cfg->ro;
lun->initially_ro = lun->ro;
lun->removable = !!cfg->removable;
if (!common->sysfs) {
/* we DON'T own the name!*/
lun->name = name;
} else {
lun->dev.release = fsg_lun_release;
lun->dev.parent = &common->gadget->dev;
dev_set_drvdata(&lun->dev, &common->filesem);
dev_set_name(&lun->dev, "%s", name);
lun->name = dev_name(&lun->dev);
rc = fsg_common_add_sysfs(common, lun);
if (rc) {
pr_info("failed to register LUN%d: %d\n", id, rc);
goto error_sysfs;
}
}
common->luns[id] = lun;
if (cfg->filename) {
rc = fsg_lun_open(lun, cfg->filename);
if (rc)
goto error_lun;
}
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
p = "(no medium)";
if (fsg_lun_is_open(lun)) {
p = "(error)";
if (pathbuf) {
p = d_path(&lun->filp->f_path, pathbuf, PATH_MAX);
if (IS_ERR(p))
p = "(error)";
}
}
pr_info("LUN: %s%s%sfile: %s\n",
lun->removable ? "removable " : "",
lun->ro ? "read only " : "",
lun->cdrom ? "CD-ROM " : "",
p);
kfree(pathbuf);
return 0;
error_lun:
if (common->sysfs) {
fsg_common_remove_sysfs(lun);
device_unregister(&lun->dev);
}
fsg_lun_close(lun);
common->luns[id] = NULL;
error_sysfs:
kfree(lun);
return rc;
}
EXPORT_SYMBOL_GPL(fsg_common_create_lun);
int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg)
{
char buf[8]; /* enough for 100000000 different numbers, decimal */
int i, rc;
for (i = 0; i < common->nluns; ++i) {
snprintf(buf, sizeof(buf), "lun%d", i);
rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL);
if (rc)
goto fail;
}
pr_info("Number of LUNs=%d\n", common->nluns);
return 0;
fail:
_fsg_common_remove_luns(common, i);
return rc;
}
EXPORT_SYMBOL_GPL(fsg_common_create_luns);
void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
const char *pn)
{
int i;
/* Prepare inquiryString */
i = get_default_bcdDevice();
snprintf(common->inquiry_string, sizeof(common->inquiry_string),
"%-8s%-16s%04x", vn ?: "Linux",
/* Assume product name dependent on the first LUN */
pn ?: ((*common->luns)->cdrom
? "File-CD Gadget"
: "File-Stor Gadget"),
i);
}
EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
int fsg_common_run_thread(struct fsg_common *common)
{
common->state = FSG_STATE_IDLE;
/* Tell the thread to start working */
common->thread_task =
kthread_create(fsg_main_thread, common, "file-storage");
if (IS_ERR(common->thread_task)) {
common->state = FSG_STATE_TERMINATED;
return PTR_ERR(common->thread_task);
}
DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
wake_up_process(common->thread_task);
return 0;
}
EXPORT_SYMBOL_GPL(fsg_common_run_thread);
static void fsg_common_release(struct kref *ref)
{
struct fsg_common *common = container_of(ref, struct fsg_common, ref);
/* If the thread isn't already dead, tell it to exit now */
if (common->state != FSG_STATE_TERMINATED) {
raise_exception(common, FSG_STATE_EXIT);
wait_for_completion(&common->thread_notifier);
}
if (likely(common->luns)) {
struct fsg_lun **lun_it = common->luns;
unsigned i = common->nluns;
/* In error recovery common->nluns may be zero. */
for (; i; --i, ++lun_it) {
struct fsg_lun *lun = *lun_it;
if (!lun)
continue;
if (common->sysfs)
fsg_common_remove_sysfs(lun);
fsg_lun_close(lun);
if (common->sysfs)
device_unregister(&lun->dev);
kfree(lun);
}
kfree(common->luns);
}
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
if (common->free_storage_on_release)
kfree(common);
}
int fsg_sysfs_update(struct fsg_common *common, struct device *dev, bool create)
{
int ret = 0, i;
pr_debug("%s(): common->nluns:%d\n", __func__, common->nluns);
if (create) {
for (i = 0; i < common->nluns; i++) {
if (i == 0)
snprintf(common->name[i], 8, "lun");
else
snprintf(common->name[i], 8, "lun%d", i-1);
ret = sysfs_create_link(&dev->kobj,
&common->luns[i]->dev.kobj,
common->name[i]);
if (ret) {
pr_err("%s(): failed creating sysfs:%d %s)\n",
__func__, i, common->name[i]);
goto remove_sysfs;
}
}
} else {
i = common->nluns;
goto remove_sysfs;
}
return 0;
remove_sysfs:
for (; i > 0; i--) {
pr_debug("%s(): delete sysfs for lun(id:%d)(name:%s)\n",
__func__, i, common->name[i-1]);
sysfs_remove_link(&dev->kobj, common->name[i-1]);
}
return ret;
}
EXPORT_SYMBOL(fsg_sysfs_update);
/*-------------------------------------------------------------------------*/
static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
struct usb_gadget *gadget = c->cdev->gadget;
int i;
struct usb_ep *ep;
unsigned max_burst;
int ret;
struct fsg_opts *opts;
opts = fsg_opts_from_func_inst(f->fi);
if (!opts->no_configfs) {
ret = fsg_common_set_cdev(fsg->common, c->cdev,
fsg->common->can_stall);
if (ret)
return ret;
#ifdef CONFIG_ZTEMT_USB
fsg_common_set_inquiry_string(fsg->common, "nubia", "Android");
#else
fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
#endif
ret = fsg_common_run_thread(fsg->common);
if (ret)
return ret;
}
fsg->gadget = gadget;
/* New interface */
i = usb_interface_id(c, f);
if (i < 0)
return i;
fsg_intf_desc.bInterfaceNumber = i;
fsg->interface_number = i;
/* Find all the endpoints we will use */
ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
if (!ep)
goto autoconf_fail;
ep->driver_data = fsg->common; /* claim the endpoint */
fsg->bulk_in = ep;
ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
if (!ep)
goto autoconf_fail;
ep->driver_data = fsg->common; /* claim the endpoint */
fsg->bulk_out = ep;
/* Assume endpoint addresses are the same for both speeds */
fsg_hs_bulk_in_desc.bEndpointAddress =
fsg_fs_bulk_in_desc.bEndpointAddress;
fsg_hs_bulk_out_desc.bEndpointAddress =
fsg_fs_bulk_out_desc.bEndpointAddress;
/* Calculate bMaxBurst, we know packet size is 1024 */
max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
fsg_ss_bulk_in_desc.bEndpointAddress =
fsg_fs_bulk_in_desc.bEndpointAddress;
fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
fsg_ss_bulk_out_desc.bEndpointAddress =
fsg_fs_bulk_out_desc.bEndpointAddress;
fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function,
fsg_ss_function);
if (ret)
goto autoconf_fail;
return 0;
autoconf_fail:
ERROR(fsg, "unable to autoconfigure all endpoints\n");
return -ENOTSUPP;
}
/****************************** ALLOCATE FUNCTION *************************/
static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
struct fsg_common *common = fsg->common;
DBG(fsg, "unbind\n");
if (fsg->common->fsg == fsg) {
fsg->common->new_fsg = NULL;
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
/* FIXME: make interruptible or killable somehow? */
wait_event(common->fsg_wait, common->fsg != fsg);
}
usb_free_all_descriptors(&fsg->function);
}
static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct fsg_lun_opts, group);
}
static inline struct fsg_opts *to_fsg_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct fsg_opts,
func_inst.group);
}
CONFIGFS_ATTR_STRUCT(fsg_lun_opts);
CONFIGFS_ATTR_OPS(fsg_lun_opts);
static void fsg_lun_attr_release(struct config_item *item)
{
struct fsg_lun_opts *lun_opts;
lun_opts = to_fsg_lun_opts(item);
kfree(lun_opts);
}
static struct configfs_item_operations fsg_lun_item_ops = {
.release = fsg_lun_attr_release,
.show_attribute = fsg_lun_opts_attr_show,
.store_attribute = fsg_lun_opts_attr_store,
};
static ssize_t fsg_lun_opts_file_show(struct fsg_lun_opts *opts, char *page)
{
struct fsg_opts *fsg_opts;
fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page);
}
static ssize_t fsg_lun_opts_file_store(struct fsg_lun_opts *opts,
const char *page, size_t len)
{
struct fsg_opts *fsg_opts;
fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len);
}
static struct fsg_lun_opts_attribute fsg_lun_opts_file =
__CONFIGFS_ATTR(file, S_IRUGO | S_IWUSR, fsg_lun_opts_file_show,
fsg_lun_opts_file_store);
static ssize_t fsg_lun_opts_ro_show(struct fsg_lun_opts *opts, char *page)
{
return fsg_show_ro(opts->lun, page);
}
static ssize_t fsg_lun_opts_ro_store(struct fsg_lun_opts *opts,
const char *page, size_t len)
{
struct fsg_opts *fsg_opts;
fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len);
}
static struct fsg_lun_opts_attribute fsg_lun_opts_ro =
__CONFIGFS_ATTR(ro, S_IRUGO | S_IWUSR, fsg_lun_opts_ro_show,
fsg_lun_opts_ro_store);
static ssize_t fsg_lun_opts_removable_show(struct fsg_lun_opts *opts,
char *page)
{
return fsg_show_removable(opts->lun, page);
}
static ssize_t fsg_lun_opts_removable_store(struct fsg_lun_opts *opts,
const char *page, size_t len)
{
return fsg_store_removable(opts->lun, page, len);
}
static struct fsg_lun_opts_attribute fsg_lun_opts_removable =
__CONFIGFS_ATTR(removable, S_IRUGO | S_IWUSR,
fsg_lun_opts_removable_show,
fsg_lun_opts_removable_store);
static ssize_t fsg_lun_opts_cdrom_show(struct fsg_lun_opts *opts, char *page)
{
return fsg_show_cdrom(opts->lun, page);
}
static ssize_t fsg_lun_opts_cdrom_store(struct fsg_lun_opts *opts,
const char *page, size_t len)
{
struct fsg_opts *fsg_opts;
fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page,
len);
}
static struct fsg_lun_opts_attribute fsg_lun_opts_cdrom =
__CONFIGFS_ATTR(cdrom, S_IRUGO | S_IWUSR, fsg_lun_opts_cdrom_show,
fsg_lun_opts_cdrom_store);
static ssize_t fsg_lun_opts_nofua_show(struct fsg_lun_opts *opts, char *page)
{
return fsg_show_nofua(opts->lun, page);
}
static ssize_t fsg_lun_opts_nofua_store(struct fsg_lun_opts *opts,
const char *page, size_t len)
{
return fsg_store_nofua(opts->lun, page, len);
}
static struct fsg_lun_opts_attribute fsg_lun_opts_nofua =
__CONFIGFS_ATTR(nofua, S_IRUGO | S_IWUSR, fsg_lun_opts_nofua_show,
fsg_lun_opts_nofua_store);
static struct configfs_attribute *fsg_lun_attrs[] = {
&fsg_lun_opts_file.attr,
&fsg_lun_opts_ro.attr,
&fsg_lun_opts_removable.attr,
&fsg_lun_opts_cdrom.attr,
&fsg_lun_opts_nofua.attr,
NULL,
};
static struct config_item_type fsg_lun_type = {
.ct_item_ops = &fsg_lun_item_ops,
.ct_attrs = fsg_lun_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_group *fsg_lun_make(struct config_group *group,
const char *name)
{
struct fsg_lun_opts *opts;
struct fsg_opts *fsg_opts;
struct fsg_lun_config config;
char *num_str;
u8 num;
int ret;
num_str = strchr(name, '.');
if (!num_str) {
pr_err("Unable to locate . in LUN.NUMBER\n");
return ERR_PTR(-EINVAL);
}
num_str++;
ret = kstrtou8(num_str, 0, &num);
if (ret)
return ERR_PTR(ret);
fsg_opts = to_fsg_opts(&group->cg_item);
if (num >= FSG_MAX_LUNS)
return ERR_PTR(-ERANGE);
mutex_lock(&fsg_opts->lock);
if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
ret = -EBUSY;
goto out;
}
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts) {
ret = -ENOMEM;
goto out;
}
memset(&config, 0, sizeof(config));
config.removable = true;
ret = fsg_common_create_lun(fsg_opts->common, &config, num, name,
(const char **)&group->cg_item.ci_name);
if (ret) {
kfree(opts);
goto out;
}
opts->lun = fsg_opts->common->luns[num];
opts->lun_id = num;
mutex_unlock(&fsg_opts->lock);
config_group_init_type_name(&opts->group, name, &fsg_lun_type);
return &opts->group;
out:
mutex_unlock(&fsg_opts->lock);
return ERR_PTR(ret);
}
static void fsg_lun_drop(struct config_group *group, struct config_item *item)
{
struct fsg_lun_opts *lun_opts;
struct fsg_opts *fsg_opts;
lun_opts = to_fsg_lun_opts(item);
fsg_opts = to_fsg_opts(&group->cg_item);
mutex_lock(&fsg_opts->lock);
if (fsg_opts->refcnt) {
struct config_item *gadget;
gadget = group->cg_item.ci_parent->ci_parent;
unregister_gadget_item(gadget);
}
fsg_common_remove_lun(lun_opts->lun, fsg_opts->common->sysfs);
fsg_opts->common->luns[lun_opts->lun_id] = NULL;
lun_opts->lun_id = 0;
mutex_unlock(&fsg_opts->lock);
config_item_put(item);
}
CONFIGFS_ATTR_STRUCT(fsg_opts);
CONFIGFS_ATTR_OPS(fsg_opts);
static void fsg_attr_release(struct config_item *item)
{
struct fsg_opts *opts = to_fsg_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations fsg_item_ops = {
.release = fsg_attr_release,
.show_attribute = fsg_opts_attr_show,
.store_attribute = fsg_opts_attr_store,
};
static ssize_t fsg_opts_stall_show(struct fsg_opts *opts, char *page)
{
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%d", opts->common->can_stall);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t fsg_opts_stall_store(struct fsg_opts *opts, const char *page,
size_t len)
{
int ret;
bool stall;
mutex_lock(&opts->lock);
if (opts->refcnt) {
mutex_unlock(&opts->lock);
return -EBUSY;
}
ret = strtobool(page, &stall);
if (!ret) {
opts->common->can_stall = stall;
ret = len;
}
mutex_unlock(&opts->lock);
return ret;
}
static struct fsg_opts_attribute fsg_opts_stall =
__CONFIGFS_ATTR(stall, S_IRUGO | S_IWUSR, fsg_opts_stall_show,
fsg_opts_stall_store);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static ssize_t fsg_opts_num_buffers_show(struct fsg_opts *opts, char *page)
{
int result;
mutex_lock(&opts->lock);
result = sprintf(page, "%d", opts->common->fsg_num_buffers);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t fsg_opts_num_buffers_store(struct fsg_opts *opts,
const char *page, size_t len)
{
int ret;
u8 num;
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto end;
}
ret = kstrtou8(page, 0, &num);
if (ret)
goto end;
ret = fsg_num_buffers_validate(num);
if (ret)
goto end;
fsg_common_set_num_buffers(opts->common, num);
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
static struct fsg_opts_attribute fsg_opts_num_buffers =
__CONFIGFS_ATTR(num_buffers, S_IRUGO | S_IWUSR,
fsg_opts_num_buffers_show,
fsg_opts_num_buffers_store);
#endif
static struct configfs_attribute *fsg_attrs[] = {
&fsg_opts_stall.attr,
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
&fsg_opts_num_buffers.attr,
#endif
NULL,
};
static struct configfs_group_operations fsg_group_ops = {
.make_group = fsg_lun_make,
.drop_item = fsg_lun_drop,
};
static struct config_item_type fsg_func_type = {
.ct_item_ops = &fsg_item_ops,
.ct_group_ops = &fsg_group_ops,
.ct_attrs = fsg_attrs,
.ct_owner = THIS_MODULE,
};
static void fsg_free_inst(struct usb_function_instance *fi)
{
struct fsg_opts *opts;
opts = fsg_opts_from_func_inst(fi);
fsg_common_put(opts->common);
kfree(opts);
}
static struct usb_function_instance *fsg_alloc_inst(void)
{
struct fsg_opts *opts;
struct fsg_lun_config config;
int rc;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = fsg_free_inst;
opts->common = fsg_common_setup(opts->common);
if (IS_ERR(opts->common)) {
rc = PTR_ERR(opts->common);
goto release_opts;
}
rc = fsg_common_set_nluns(opts->common, FSG_MAX_LUNS);
if (rc)
goto release_opts;
rc = fsg_common_set_num_buffers(opts->common,
CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
if (rc)
goto release_luns;
pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
memset(&config, 0, sizeof(config));
config.removable = true;
rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
(const char **)&opts->func_inst.group.cg_item.ci_name);
opts->lun0.lun = opts->common->luns[0];
opts->lun0.lun_id = 0;
config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
opts->default_groups[0] = &opts->lun0.group;
opts->func_inst.group.default_groups = opts->default_groups;
config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type);
return &opts->func_inst;
release_luns:
kfree(opts->common->luns);
release_opts:
kfree(opts);
return ERR_PTR(rc);
}
static void fsg_free(struct usb_function *f)
{
struct fsg_dev *fsg;
struct fsg_opts *opts;
fsg = container_of(f, struct fsg_dev, function);
opts = container_of(f->fi, struct fsg_opts, func_inst);
mutex_lock(&opts->lock);
opts->refcnt--;
mutex_unlock(&opts->lock);
kfree(fsg);
}
static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
{
struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
struct fsg_common *common = opts->common;
struct fsg_dev *fsg;
fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
if (unlikely(!fsg))
return ERR_PTR(-ENOMEM);
mutex_lock(&opts->lock);
opts->refcnt++;
mutex_unlock(&opts->lock);
fsg->function.name = FSG_DRIVER_DESC;
fsg->function.bind = fsg_bind;
fsg->function.unbind = fsg_unbind;
fsg->function.setup = fsg_setup;
fsg->function.set_alt = fsg_set_alt;
fsg->function.disable = fsg_disable;
fsg->function.free_func = fsg_free;
fsg->common = common;
setup_timer(&common->vfs_timer, msc_usb_vfs_timer_func,
(unsigned long) common);
return &fsg->function;
}
DECLARE_USB_FUNCTION_INIT(mass_storage, fsg_alloc_inst, fsg_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Nazarewicz");
/************************* Module parameters *************************/
void fsg_config_from_params(struct fsg_config *cfg,
const struct fsg_module_parameters *params,
unsigned int fsg_num_buffers)
{
struct fsg_lun_config *lun;
unsigned i;
/* Configure LUNs */
cfg->nluns =
min(params->luns ?: (params->file_count ?: 1u),
(unsigned)FSG_MAX_LUNS);
for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
lun->ro = !!params->ro[i];
lun->cdrom = !!params->cdrom[i];
lun->removable = !!params->removable[i];
lun->filename =
params->file_count > i && params->file[i][0]
? params->file[i]
: NULL;
}
/* Let MSF use defaults */
cfg->vendor_name = NULL;
cfg->product_name = NULL;
cfg->ops = NULL;
cfg->private_data = NULL;
/* Finalise */
cfg->can_stall = params->stall;
cfg->fsg_num_buffers = fsg_num_buffers;
}
EXPORT_SYMBOL_GPL(fsg_config_from_params);
| xingrz/android_kernel_nubia_msm8996 | drivers/usb/gadget/function/f_mass_storage.c | C | gpl-2.0 | 106,868 |
/* Copyright (C) 2001 artofcode LLC. All rights reserved.
This software is provided AS-IS with no warranty, either express or
implied.
This software is distributed under license and may not be copied,
modified or distributed except as expressly authorized under the terms
of the license contained in the file LICENSE in this distribution.
For more information about licensing, please refer to
http://www.ghostscript.com/licensing/. For information on
commercial licensing, go to http://www.artifex.com/licensing/ or
contact Artifex Software, Inc., 101 Lucas Valley Road #110,
San Rafael, CA 94903, U.S.A., +1(415)492-9861.
*/
/* $Id: gp_stdia.c,v 1.5 2002/02/21 22:24:52 giles Exp $ */
/* Read stdin on platforms that support unbuffered read. */
/* We want unbuffered for console input and pipes. */
#include "stdio_.h"
#include "time_.h"
#include "unistd_.h"
#include "gx.h"
#include "gp.h"
/* Read bytes from stdin, unbuffered if possible. */
int gp_stdin_read(char *buf, int len, int interactive, FILE *f)
{
return read(fileno(f), buf, len);
}
| brho/plan9 | sys/src/cmd/gs/src/gp_stdia.c | C | gpl-2.0 | 1,087 |
/* Qualcomm Crypto Engine driver.
*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/qcedev.h>
#include <linux/bitops.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <mach/dma.h>
#include <mach/clk.h>
#include <mach/socinfo.h>
#include <mach/qcrypto.h>
#include "qce.h"
#include "qce50.h"
#include "qcryptohw_50.h"
#define CRYPTO_CONFIG_RESET 0xE001F
#define QCE_MAX_NUM_DSCR 0x500
#define QCE_SECTOR_SIZE 0x200
static DEFINE_MUTEX(bam_register_cnt);
struct bam_registration_info {
uint32_t handle;
uint32_t cnt;
};
static struct bam_registration_info bam_registry;
static bool ce_bam_registered;
/*
* CE HW device structure.
* Each engine has an instance of the structure.
* Each engine can only handle one crypto operation at one time. It is up to
* the sw above to ensure single threading of operation on an engine.
*/
struct qce_device {
struct device *pdev; /* Handle to platform_device structure */
unsigned char *coh_vmem; /* Allocated coherent virtual memory */
dma_addr_t coh_pmem; /* Allocated coherent physical memory */
int memsize; /* Memory allocated */
int is_shared; /* CE HW is shared */
bool support_cmd_dscr;
bool support_hw_key;
void __iomem *iobase; /* Virtual io base of CE HW */
unsigned int phy_iobase; /* Physical io base of CE HW */
struct clk *ce_core_src_clk; /* Handle to CE src clk*/
struct clk *ce_core_clk; /* Handle to CE clk */
struct clk *ce_clk; /* Handle to CE clk */
struct clk *ce_bus_clk; /* Handle to CE AXI clk*/
qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
int assoc_nents;
int ivsize;
int authsize;
int src_nents;
int dst_nents;
dma_addr_t phy_iv_in;
unsigned char dec_iv[16];
int dir;
void *areq;
enum qce_cipher_mode_enum mode;
struct qce_ce_cfg_reg_setting reg;
struct ce_sps_data ce_sps;
};
/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
static uint32_t _std_init_vector_sha1[] = {
0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
};
/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
static uint32_t _std_init_vector_sha256[] = {
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
};
static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
unsigned int len)
{
unsigned n;
n = len / sizeof(uint32_t) ;
for (; n > 0; n--) {
*iv = ((*b << 24) & 0xff000000) |
(((*(b+1)) << 16) & 0xff0000) |
(((*(b+2)) << 8) & 0xff00) |
(*(b+3) & 0xff);
b += sizeof(uint32_t);
iv++;
}
n = len % sizeof(uint32_t);
if (n == 3) {
*iv = ((*b << 24) & 0xff000000) |
(((*(b+1)) << 16) & 0xff0000) |
(((*(b+2)) << 8) & 0xff00) ;
} else if (n == 2) {
*iv = ((*b << 24) & 0xff000000) |
(((*(b+1)) << 16) & 0xff0000) ;
} else if (n == 1) {
*iv = ((*b << 24) & 0xff000000) ;
}
}
static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
unsigned int len)
{
unsigned i, j;
unsigned char swap_iv[AES_IV_LENGTH];
memset(swap_iv, 0, AES_IV_LENGTH);
for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
swap_iv[i] = b[j];
_byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
}
static int count_sg(struct scatterlist *sg, int nbytes)
{
int i;
for (i = 0; nbytes > 0; i++, sg = scatterwalk_sg_next(sg))
nbytes -= sg->length;
return i;
}
static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
int i;
for (i = 0; i < nents; ++i) {
dma_map_sg(dev, sg, 1, direction);
sg = scatterwalk_sg_next(sg);
}
return nents;
}
static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
int i;
for (i = 0; i < nents; ++i) {
dma_unmap_sg(dev, sg, 1, direction);
sg = scatterwalk_sg_next(sg);
}
return nents;
}
static int _probe_ce_engine(struct qce_device *pce_dev)
{
unsigned int rev;
unsigned int maj_rev, min_rev, step_rev;
rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
mb();
maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
if (maj_rev != 0x05) {
pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
return -EIO;
};
pce_dev->ce_sps.minor_version = min_rev;
dev_info(pce_dev->pdev, "Qualcomm Crypto %d.%d.%d device found @0x%x\n",
maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
pce_dev->ce_sps.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
dev_info(pce_dev->pdev,
"IO base, CE = 0x%x\n, "
"Consumer (IN) PIPE %d, "
"Producer (OUT) PIPE %d\n"
"IO base BAM = 0x%x\n"
"BAM IRQ %d\n",
(uint32_t) pce_dev->iobase,
pce_dev->ce_sps.dest_pipe_index,
pce_dev->ce_sps.src_pipe_index,
(uint32_t)pce_dev->ce_sps.bam_iobase,
pce_dev->ce_sps.bam_irq);
return 0;
};
static int _ce_get_hash_cmdlistinfo(struct qce_device *pce_dev,
struct qce_sha_req *sreq,
struct qce_cmdlist_info **cmdplistinfo)
{
struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr;
switch (sreq->alg) {
case QCE_HASH_SHA1:
*cmdplistinfo = &cmdlistptr->auth_sha1;
break;
case QCE_HASH_SHA256:
*cmdplistinfo = &cmdlistptr->auth_sha256;
break;
case QCE_HASH_SHA1_HMAC:
*cmdplistinfo = &cmdlistptr->auth_sha1_hmac;
break;
case QCE_HASH_SHA256_HMAC:
*cmdplistinfo = &cmdlistptr->auth_sha256_hmac;
break;
case QCE_HASH_AES_CMAC:
if (sreq->authklen == AES128_KEY_SIZE)
*cmdplistinfo = &cmdlistptr->auth_aes_128_cmac;
else
*cmdplistinfo = &cmdlistptr->auth_aes_256_cmac;
break;
default:
break;
}
return 0;
}
static int _ce_setup_hash(struct qce_device *pce_dev,
struct qce_sha_req *sreq,
struct qce_cmdlist_info *cmdlistinfo)
{
uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
uint32_t diglen;
int i;
uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
bool sha1 = false;
struct sps_command_element *pce = NULL;
if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
(sreq->alg == QCE_HASH_SHA256_HMAC) ||
(sreq->alg == QCE_HASH_AES_CMAC)) {
uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
_byte_stream_to_net_words(mackey32, sreq->authkey,
sreq->authklen);
/* check for null key. If null, use hw key*/
for (i = 0; i < authk_size_in_word; i++) {
if (mackey32[i] != 0)
break;
}
pce = cmdlistinfo->go_proc;
if (i == authk_size_in_word) {
pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
pce_dev->phy_iobase);
} else {
pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
pce_dev->phy_iobase);
pce = cmdlistinfo->auth_key;
for (i = 0; i < authk_size_in_word; i++, pce++)
pce->data = mackey32[i];
}
}
if (sreq->alg == QCE_HASH_AES_CMAC)
goto go_proc;
/* if not the last, the size has to be on the block boundary */
if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
return -EIO;
switch (sreq->alg) {
case QCE_HASH_SHA1:
case QCE_HASH_SHA1_HMAC:
diglen = SHA1_DIGEST_SIZE;
sha1 = true;
break;
case QCE_HASH_SHA256:
case QCE_HASH_SHA256_HMAC:
diglen = SHA256_DIGEST_SIZE;
break;
default:
return -EINVAL;
}
/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
if (sreq->first_blk) {
if (sha1) {
for (i = 0; i < 5; i++)
auth32[i] = _std_init_vector_sha1[i];
} else {
for (i = 0; i < 8; i++)
auth32[i] = _std_init_vector_sha256[i];
}
} else {
_byte_stream_to_net_words(auth32, sreq->digest, diglen);
}
pce = cmdlistinfo->auth_iv;
for (i = 0; i < 5; i++, pce++)
pce->data = auth32[i];
if ((sreq->alg == QCE_HASH_SHA256) ||
(sreq->alg == QCE_HASH_SHA256_HMAC)) {
for (i = 5; i < 8; i++, pce++)
pce->data = auth32[i];
}
/* write auth_bytecnt 0/1, start with 0 */
pce = cmdlistinfo->auth_bytecount;
for (i = 0; i < 2; i++, pce++)
pce->data = sreq->auth_data[i];
/* Set/reset last bit in CFG register */
pce = cmdlistinfo->auth_seg_cfg;
if (sreq->last_blk)
pce->data |= 1 << CRYPTO_LAST;
else
pce->data &= ~(1 << CRYPTO_LAST);
if (sreq->first_blk)
pce->data |= 1 << CRYPTO_FIRST;
else
pce->data &= ~(1 << CRYPTO_FIRST);
go_proc:
/* write auth seg size */
pce = cmdlistinfo->auth_seg_size;
pce->data = sreq->size;
pce = cmdlistinfo->encr_seg_cfg;
pce->data = 0;
/* write auth seg size start*/
pce = cmdlistinfo->auth_seg_start;
pce->data = 0;
/* write seg size */
pce = cmdlistinfo->seg_size;
pce->data = sreq->size;
return 0;
}
static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
struct qce_device *pce_dev, struct qce_req *creq)
{
switch (creq->alg) {
case CIPHER_ALG_DES:
switch (creq->mode) {
case QCE_MODE_ECB:
return &pce_dev->ce_sps.
cmdlistptr.aead_hmac_sha1_ecb_des;
break;
case QCE_MODE_CBC:
return &pce_dev->ce_sps.
cmdlistptr.aead_hmac_sha1_cbc_des;
break;
default:
return NULL;
}
break;
case CIPHER_ALG_3DES:
switch (creq->mode) {
case QCE_MODE_ECB:
return &pce_dev->ce_sps.
cmdlistptr.aead_hmac_sha1_ecb_3des;
break;
case QCE_MODE_CBC:
return &pce_dev->ce_sps.
cmdlistptr.aead_hmac_sha1_cbc_3des;
break;
default:
return NULL;
}
break;
case CIPHER_ALG_AES:
switch (creq->mode) {
case QCE_MODE_ECB:
if (creq->encklen == AES128_KEY_SIZE)
return &pce_dev->ce_sps.
cmdlistptr.aead_hmac_sha1_ecb_aes_128;
else if (creq->encklen == AES256_KEY_SIZE)
return &pce_dev->ce_sps.
cmdlistptr.aead_hmac_sha1_ecb_aes_256;
else
return NULL;
break;
case QCE_MODE_CBC:
if (creq->encklen == AES128_KEY_SIZE)
return &pce_dev->ce_sps.
cmdlistptr.aead_hmac_sha1_cbc_aes_128;
else if (creq->encklen == AES256_KEY_SIZE)
return &pce_dev->ce_sps.
cmdlistptr.aead_hmac_sha1_cbc_aes_256;
else
return NULL;
break;
default:
return NULL;
}
break;
default:
return NULL;
}
return NULL;
}
static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
uint32_t totallen_in, uint32_t coffset,
struct qce_cmdlist_info *cmdlistinfo)
{
int32_t authk_size_in_word = q_req->authklen/sizeof(uint32_t);
int i;
uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
struct sps_command_element *pce;
uint32_t a_cfg;
uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
uint32_t enck_size_in_word = 0;
uint32_t enciv_in_word;
uint32_t key_size;
uint32_t encr_cfg = 0;
uint32_t ivsize = q_req->ivsize;
key_size = q_req->encklen;
enck_size_in_word = key_size/sizeof(uint32_t);
switch (q_req->alg) {
case CIPHER_ALG_DES:
enciv_in_word = 2;
break;
case CIPHER_ALG_3DES:
enciv_in_word = 2;
break;
case CIPHER_ALG_AES:
if ((key_size != AES128_KEY_SIZE) &&
(key_size != AES256_KEY_SIZE))
return -EINVAL;
enciv_in_word = 4;
break;
default:
return -EINVAL;
}
switch (q_req->mode) {
case QCE_MODE_ECB:
case QCE_MODE_CBC:
case QCE_MODE_CTR:
pce_dev->mode = q_req->mode;
break;
default:
return -EINVAL;
}
if (q_req->mode != QCE_MODE_ECB) {
_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
pce = cmdlistinfo->encr_cntr_iv;
for (i = 0; i < enciv_in_word; i++, pce++)
pce->data = enciv32[i];
}
/*
* write encr key
* do not use hw key or pipe key
*/
_byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
pce = cmdlistinfo->encr_key;
for (i = 0; i < enck_size_in_word; i++, pce++)
pce->data = enckey32[i];
/* write encr seg cfg */
pce = cmdlistinfo->encr_seg_cfg;
encr_cfg = pce->data;
if (q_req->dir == QCE_ENCRYPT)
encr_cfg |= (1 << CRYPTO_ENCODE);
else
encr_cfg &= ~(1 << CRYPTO_ENCODE);
pce->data = encr_cfg;
/* we only support sha1-hmac at this point */
_byte_stream_to_net_words(mackey32, q_req->authkey,
q_req->authklen);
pce = cmdlistinfo->auth_key;
for (i = 0; i < authk_size_in_word; i++, pce++)
pce->data = mackey32[i];
pce = cmdlistinfo->auth_iv;
for (i = 0; i < 5; i++, pce++)
pce->data = _std_init_vector_sha1[i];
/* write auth_bytecnt 0/1, start with 0 */
pce = cmdlistinfo->auth_bytecount;
for (i = 0; i < 2; i++, pce++)
pce->data = 0;
pce = cmdlistinfo->auth_seg_cfg;
a_cfg = pce->data;
a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
if (q_req->dir == QCE_ENCRYPT)
a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
else
a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
pce->data = a_cfg;
/* write auth seg size */
pce = cmdlistinfo->auth_seg_size;
pce->data = totallen_in;
/* write auth seg size start*/
pce = cmdlistinfo->auth_seg_start;
pce->data = 0;
/* write seg size */
pce = cmdlistinfo->seg_size;
pce->data = totallen_in;
/* write encr seg size */
pce = cmdlistinfo->encr_seg_size;
pce->data = q_req->cryptlen;
/* write encr seg start */
pce = cmdlistinfo->encr_seg_start;
pce->data = (coffset & 0xffff);
return 0;
};
static int _ce_get_cipher_cmdlistinfo(struct qce_device *pce_dev,
struct qce_req *creq,
struct qce_cmdlist_info **cmdlistinfo)
{
struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr;
if (creq->alg != CIPHER_ALG_AES) {
switch (creq->alg) {
case CIPHER_ALG_DES:
if (creq->mode == QCE_MODE_ECB)
*cmdlistinfo = &cmdlistptr->cipher_des_ecb;
else
*cmdlistinfo = &cmdlistptr->cipher_des_cbc;
break;
case CIPHER_ALG_3DES:
if (creq->mode == QCE_MODE_ECB)
*cmdlistinfo =
&cmdlistptr->cipher_3des_ecb;
else
*cmdlistinfo =
&cmdlistptr->cipher_3des_cbc;
break;
default:
break;
}
} else {
switch (creq->mode) {
case QCE_MODE_ECB:
if (creq->encklen == AES128_KEY_SIZE)
*cmdlistinfo = &cmdlistptr->cipher_aes_128_ecb;
else
*cmdlistinfo = &cmdlistptr->cipher_aes_256_ecb;
break;
case QCE_MODE_CBC:
case QCE_MODE_CTR:
if (creq->encklen == AES128_KEY_SIZE)
*cmdlistinfo =
&cmdlistptr->cipher_aes_128_cbc_ctr;
else
*cmdlistinfo =
&cmdlistptr->cipher_aes_256_cbc_ctr;
break;
case QCE_MODE_XTS:
if (creq->encklen/2 == AES128_KEY_SIZE)
*cmdlistinfo = &cmdlistptr->cipher_aes_128_xts;
else
*cmdlistinfo = &cmdlistptr->cipher_aes_256_xts;
break;
case QCE_MODE_CCM:
if (creq->encklen == AES128_KEY_SIZE)
*cmdlistinfo = &cmdlistptr->aead_aes_128_ccm;
else
*cmdlistinfo = &cmdlistptr->aead_aes_256_ccm;
break;
default:
break;
}
}
return 0;
}
static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
uint32_t totallen_in, uint32_t coffset,
struct qce_cmdlist_info *cmdlistinfo)
{
uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
0, 0, 0, 0};
uint32_t enck_size_in_word = 0;
uint32_t key_size;
bool use_hw_key = false;
bool use_pipe_key = false;
uint32_t encr_cfg = 0;
uint32_t ivsize = creq->ivsize;
int i;
struct sps_command_element *pce = NULL;
if (creq->mode == QCE_MODE_XTS)
key_size = creq->encklen/2;
else
key_size = creq->encklen;
pce = cmdlistinfo->go_proc;
if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
use_hw_key = true;
} else {
if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
QCRYPTO_CTX_USE_PIPE_KEY)
use_pipe_key = true;
}
pce = cmdlistinfo->go_proc;
if (use_hw_key == true)
pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
pce_dev->phy_iobase);
else
pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
pce_dev->phy_iobase);
if ((use_pipe_key == false) && (use_hw_key == false)) {
_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
enck_size_in_word = key_size/sizeof(uint32_t);
}
if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
uint32_t auth_cfg = 0;
/* write nonce */
_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
pce = cmdlistinfo->auth_nonce_info;
for (i = 0; i < noncelen32; i++, pce++)
pce->data = nonce32[i];
if (creq->authklen == AES128_KEY_SIZE)
auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
else {
if (creq->authklen == AES256_KEY_SIZE)
auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
}
if (creq->dir == QCE_ENCRYPT)
auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
else
auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
if (use_hw_key == true) {
auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
} else {
auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
/* write auth key */
pce = cmdlistinfo->auth_key;
for (i = 0; i < authklen32; i++, pce++)
pce->data = enckey32[i];
}
pce = cmdlistinfo->auth_seg_cfg;
pce->data = auth_cfg;
pce = cmdlistinfo->auth_seg_size;
if (creq->dir == QCE_ENCRYPT)
pce->data = totallen_in;
else
pce->data = totallen_in - creq->authsize;
pce = cmdlistinfo->auth_seg_start;
pce->data = 0;
} else {
if (creq->op != QCE_REQ_AEAD) {
pce = cmdlistinfo->auth_seg_cfg;
pce->data = 0;
}
}
switch (creq->mode) {
case QCE_MODE_ECB:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
else
encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
break;
case QCE_MODE_CBC:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
else
encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
break;
case QCE_MODE_XTS:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
else
encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
break;
case QCE_MODE_CCM:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
else
encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
break;
case QCE_MODE_CTR:
default:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
else
encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
break;
}
pce_dev->mode = creq->mode;
switch (creq->alg) {
case CIPHER_ALG_DES:
if (creq->mode != QCE_MODE_ECB) {
_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
pce = cmdlistinfo->encr_cntr_iv;
pce->data = enciv32[0];
pce++;
pce->data = enciv32[1];
}
if (use_hw_key == false) {
pce = cmdlistinfo->encr_key;
pce->data = enckey32[0];
pce++;
pce->data = enckey32[1];
}
break;
case CIPHER_ALG_3DES:
if (creq->mode != QCE_MODE_ECB) {
_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
pce = cmdlistinfo->encr_cntr_iv;
pce->data = enciv32[0];
pce++;
pce->data = enciv32[1];
}
if (use_hw_key == false) {
/* write encr key */
pce = cmdlistinfo->encr_key;
for (i = 0; i < 6; i++, pce++)
pce->data = enckey32[i];
}
break;
case CIPHER_ALG_AES:
default:
if (creq->mode == QCE_MODE_XTS) {
uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
= {0, 0, 0, 0, 0, 0, 0, 0};
uint32_t xtsklen =
creq->encklen/(2 * sizeof(uint32_t));
if ((use_hw_key == false) && (use_pipe_key == false)) {
_byte_stream_to_net_words(xtskey32,
(creq->enckey + creq->encklen/2),
creq->encklen/2);
/* write xts encr key */
pce = cmdlistinfo->encr_xts_key;
for (i = 0; i < xtsklen; i++, pce++)
pce->data = xtskey32[i];
}
/* write xts du size */
pce = cmdlistinfo->encr_xts_du_size;
if (!(creq->flags & QCRYPTO_CTX_XTS_MASK))
pce->data = creq->cryptlen;
else
pce->data = min((unsigned int)QCE_SECTOR_SIZE,
creq->cryptlen);
}
if (creq->mode != QCE_MODE_ECB) {
if (creq->mode == QCE_MODE_XTS)
_byte_stream_swap_to_net_words(enciv32,
creq->iv, ivsize);
else
_byte_stream_to_net_words(enciv32, creq->iv,
ivsize);
/* write encr cntr iv */
pce = cmdlistinfo->encr_cntr_iv;
for (i = 0; i < 4; i++, pce++)
pce->data = enciv32[i];
if (creq->mode == QCE_MODE_CCM) {
/* write cntr iv for ccm */
pce = cmdlistinfo->encr_ccm_cntr_iv;
for (i = 0; i < 4; i++, pce++)
pce->data = enciv32[i];
/* update cntr_iv[3] by one */
pce = cmdlistinfo->encr_cntr_iv;
pce += 3;
pce->data += 1;
}
}
if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
CRYPTO_ENCR_KEY_SZ);
} else {
if (use_hw_key == false) {
/* write encr key */
pce = cmdlistinfo->encr_key;
for (i = 0; i < enck_size_in_word; i++, pce++)
pce->data = enckey32[i];
}
} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
break;
} /* end of switch (creq->mode) */
if (use_pipe_key)
encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
<< CRYPTO_USE_PIPE_KEY_ENCR);
/* write encr seg cfg */
pce = cmdlistinfo->encr_seg_cfg;
if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
if (creq->dir == QCE_ENCRYPT)
pce->data |= (1 << CRYPTO_ENCODE);
else
pce->data &= ~(1 << CRYPTO_ENCODE);
encr_cfg = pce->data;
} else {
encr_cfg |=
((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
}
if (use_hw_key == true)
encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
else
encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
pce->data = encr_cfg;
/* write encr seg size */
pce = cmdlistinfo->encr_seg_size;
if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
pce->data = (creq->cryptlen + creq->authsize);
else
pce->data = creq->cryptlen;
/* write encr seg start */
pce = cmdlistinfo->encr_seg_start;
pce->data = (coffset & 0xffff);
/* write seg size */
pce = cmdlistinfo->seg_size;
pce->data = totallen_in;
return 0;
};
static int _ce_setup_hash_direct(struct qce_device *pce_dev,
struct qce_sha_req *sreq)
{
uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
uint32_t diglen;
bool use_hw_key = false;
int i;
uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
bool sha1 = false;
uint32_t auth_cfg = 0;
/* clear status */
writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG);
writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
CRYPTO_CONFIG_REG));
/*
* Ensure previous instructions (setting the CONFIG register)
* was completed before issuing starting to set other config register
* This is to ensure the configurations are done in correct endian-ness
* as set in the CONFIG registers
*/
mb();
if (sreq->alg == QCE_HASH_AES_CMAC) {
/* write seg_cfg */
writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
/* write seg_cfg */
writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
/* write seg_cfg */
writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
/* Clear auth_ivn, auth_keyn registers */
for (i = 0; i < 16; i++) {
writel_relaxed(0, (pce_dev->iobase +
(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
writel_relaxed(0, (pce_dev->iobase +
(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
}
/* write auth_bytecnt 0/1/2/3, start with 0 */
for (i = 0; i < 4; i++)
writel_relaxed(0, pce_dev->iobase +
CRYPTO_AUTH_BYTECNT0_REG +
i * sizeof(uint32_t));
if (sreq->authklen == AES128_KEY_SIZE)
auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
else
auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
}
if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
(sreq->alg == QCE_HASH_SHA256_HMAC) ||
(sreq->alg == QCE_HASH_AES_CMAC)) {
uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
_byte_stream_to_net_words(mackey32, sreq->authkey,
sreq->authklen);
/* check for null key. If null, use hw key*/
for (i = 0; i < authk_size_in_word; i++) {
if (mackey32[i] != 0)
break;
}
if (i == authk_size_in_word)
use_hw_key = true;
else
/* Clear auth_ivn, auth_keyn registers */
for (i = 0; i < authk_size_in_word; i++)
writel_relaxed(mackey32[i], (pce_dev->iobase +
(CRYPTO_AUTH_KEY0_REG +
i*sizeof(uint32_t))));
}
if (sreq->alg == QCE_HASH_AES_CMAC)
goto go_proc;
/* if not the last, the size has to be on the block boundary */
if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
return -EIO;
switch (sreq->alg) {
case QCE_HASH_SHA1:
auth_cfg = pce_dev->reg.auth_cfg_sha1;
diglen = SHA1_DIGEST_SIZE;
sha1 = true;
break;
case QCE_HASH_SHA1_HMAC:
auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
diglen = SHA1_DIGEST_SIZE;
sha1 = true;
break;
case QCE_HASH_SHA256:
auth_cfg = pce_dev->reg.auth_cfg_sha256;
diglen = SHA256_DIGEST_SIZE;
break;
case QCE_HASH_SHA256_HMAC:
auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
diglen = SHA256_DIGEST_SIZE;
break;
default:
return -EINVAL;
}
/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
if (sreq->first_blk) {
if (sha1) {
for (i = 0; i < 5; i++)
auth32[i] = _std_init_vector_sha1[i];
} else {
for (i = 0; i < 8; i++)
auth32[i] = _std_init_vector_sha256[i];
}
} else {
_byte_stream_to_net_words(auth32, sreq->digest, diglen);
}
/* Set auth_ivn, auth_keyn registers */
for (i = 0; i < 5; i++)
writel_relaxed(auth32[i], (pce_dev->iobase +
(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
if ((sreq->alg == QCE_HASH_SHA256) ||
(sreq->alg == QCE_HASH_SHA256_HMAC)) {
for (i = 5; i < 8; i++)
writel_relaxed(auth32[i], (pce_dev->iobase +
(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
}
/* write auth_bytecnt 0/1/2/3, start with 0 */
for (i = 0; i < 2; i++)
writel_relaxed(sreq->auth_data[i], pce_dev->iobase +
CRYPTO_AUTH_BYTECNT0_REG +
i * sizeof(uint32_t));
/* Set/reset last bit in CFG register */
if (sreq->last_blk)
auth_cfg |= 1 << CRYPTO_LAST;
else
auth_cfg &= ~(1 << CRYPTO_LAST);
if (sreq->first_blk)
auth_cfg |= 1 << CRYPTO_FIRST;
else
auth_cfg &= ~(1 << CRYPTO_FIRST);
go_proc:
/* write seg_cfg */
writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
/* write auth seg_size */
writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
/* write auth_seg_start */
writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
/* reset encr seg_cfg */
writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
/* write seg_size */
writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
writel_relaxed(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
CRYPTO_CONFIG_REG));
/* issue go to crypto */
if (use_hw_key == false)
writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
pce_dev->iobase + CRYPTO_GOPROC_REG);
else
writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
/*
* Ensure previous instructions (setting the GO register)
* was completed before issuing a DMA transfer request
*/
mb();
return 0;
}
static int _ce_setup_aead_direct(struct qce_device *pce_dev,
struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
{
int32_t authk_size_in_word = q_req->authklen/sizeof(uint32_t);
int i;
uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
uint32_t a_cfg;
uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
uint32_t enck_size_in_word = 0;
uint32_t enciv_in_word;
uint32_t key_size;
uint32_t ivsize = q_req->ivsize;
uint32_t encr_cfg;
/* clear status */
writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG);
writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
CRYPTO_CONFIG_REG));
/*
* Ensure previous instructions (setting the CONFIG register)
* was completed before issuing starting to set other config register
* This is to ensure the configurations are done in correct endian-ness
* as set in the CONFIG registers
*/
mb();
key_size = q_req->encklen;
enck_size_in_word = key_size/sizeof(uint32_t);
switch (q_req->alg) {
case CIPHER_ALG_DES:
switch (q_req->mode) {
case QCE_MODE_ECB:
encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
break;
case QCE_MODE_CBC:
encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
break;
default:
return -EINVAL;
}
enciv_in_word = 2;
break;
case CIPHER_ALG_3DES:
switch (q_req->mode) {
case QCE_MODE_ECB:
encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
break;
case QCE_MODE_CBC:
encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
break;
default:
return -EINVAL;
}
enciv_in_word = 2;
break;
case CIPHER_ALG_AES:
switch (q_req->mode) {
case QCE_MODE_ECB:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
else if (key_size == AES256_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
else
return -EINVAL;
break;
case QCE_MODE_CBC:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
else if (key_size == AES256_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
else
return -EINVAL;
break;
default:
return -EINVAL;
}
enciv_in_word = 4;
break;
default:
return -EINVAL;
}
pce_dev->mode = q_req->mode;
/* write CNTR0_IV0_REG */
if (q_req->mode != QCE_MODE_ECB) {
_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
for (i = 0; i < enciv_in_word; i++)
writel_relaxed(enciv32[i], pce_dev->iobase +
(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
}
/*
* write encr key
* do not use hw key or pipe key
*/
_byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
for (i = 0; i < enck_size_in_word; i++)
writel_relaxed(enckey32[i], pce_dev->iobase +
(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
/* write encr seg cfg */
if (q_req->dir == QCE_ENCRYPT)
encr_cfg |= (1 << CRYPTO_ENCODE);
writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
/* we only support sha1-hmac at this point */
_byte_stream_to_net_words(mackey32, q_req->authkey,
q_req->authklen);
for (i = 0; i < authk_size_in_word; i++)
writel_relaxed(mackey32[i], pce_dev->iobase +
(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
for (i = 0; i < 5; i++)
writel_relaxed(_std_init_vector_sha1[i], pce_dev->iobase +
(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
/* write auth_bytecnt 0/1, start with 0 */
writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
/* write encr seg size */
writel_relaxed(q_req->cryptlen, pce_dev->iobase +
CRYPTO_ENCR_SEG_SIZE_REG);
/* write encr start */
writel_relaxed(coffset & 0xffff, pce_dev->iobase +
CRYPTO_ENCR_SEG_START_REG);
a_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE) |
(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG);
if (q_req->dir == QCE_ENCRYPT)
a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
else
a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
/* write auth seg_cfg */
writel_relaxed(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
/* write auth seg_size */
writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
/* write auth_seg_start */
writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
/* write seg_size */
writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
writel_relaxed(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
CRYPTO_CONFIG_REG));
/* issue go to crypto */
writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
pce_dev->iobase + CRYPTO_GOPROC_REG);
/*
* Ensure previous instructions (setting the GO register)
* was completed before issuing a DMA transfer request
*/
mb();
return 0;
};
static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
{
uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
0, 0, 0, 0};
uint32_t enck_size_in_word = 0;
uint32_t key_size;
bool use_hw_key = false;
bool use_pipe_key = false;
uint32_t encr_cfg = 0;
uint32_t ivsize = creq->ivsize;
int i;
/* clear status */
writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG);
writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
CRYPTO_CONFIG_REG));
/*
* Ensure previous instructions (setting the CONFIG register)
* was completed before issuing starting to set other config register
* This is to ensure the configurations are done in correct endian-ness
* as set in the CONFIG registers
*/
mb();
if (creq->mode == QCE_MODE_XTS)
key_size = creq->encklen/2;
else
key_size = creq->encklen;
if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
use_hw_key = true;
} else {
if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
QCRYPTO_CTX_USE_PIPE_KEY)
use_pipe_key = true;
}
if ((use_pipe_key == false) && (use_hw_key == false)) {
_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
enck_size_in_word = key_size/sizeof(uint32_t);
}
if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
uint32_t auth_cfg = 0;
/* Clear auth_ivn, auth_keyn registers */
for (i = 0; i < 16; i++) {
writel_relaxed(0, (pce_dev->iobase +
(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
writel_relaxed(0, (pce_dev->iobase +
(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
}
/* write auth_bytecnt 0/1/2/3, start with 0 */
for (i = 0; i < 4; i++)
writel_relaxed(0, pce_dev->iobase +
CRYPTO_AUTH_BYTECNT0_REG +
i * sizeof(uint32_t));
/* write nonce */
_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
for (i = 0; i < noncelen32; i++)
writel_relaxed(nonce32[i], pce_dev->iobase +
CRYPTO_AUTH_INFO_NONCE0_REG +
(i*sizeof(uint32_t)));
if (creq->authklen == AES128_KEY_SIZE)
auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
else {
if (creq->authklen == AES256_KEY_SIZE)
auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
}
if (creq->dir == QCE_ENCRYPT)
auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
else
auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
if (use_hw_key == true) {
auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
} else {
auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
/* write auth key */
for (i = 0; i < authklen32; i++)
writel_relaxed(enckey32[i], pce_dev->iobase +
CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
}
writel_relaxed(auth_cfg, pce_dev->iobase +
CRYPTO_AUTH_SEG_CFG_REG);
if (creq->dir == QCE_ENCRYPT)
writel_relaxed(totallen_in, pce_dev->iobase +
CRYPTO_AUTH_SEG_SIZE_REG);
else
writel_relaxed((totallen_in - creq->authsize),
pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
} else {
if (creq->op != QCE_REQ_AEAD)
writel_relaxed(0, pce_dev->iobase +
CRYPTO_AUTH_SEG_CFG_REG);
}
/*
* Ensure previous instructions (write to all AUTH registers)
* was completed before accessing a register that is not in
* in the same 1K range.
*/
mb();
switch (creq->mode) {
case QCE_MODE_ECB:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
else
encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
break;
case QCE_MODE_CBC:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
else
encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
break;
case QCE_MODE_XTS:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
else
encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
break;
case QCE_MODE_CCM:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
else
encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
break;
case QCE_MODE_CTR:
default:
if (key_size == AES128_KEY_SIZE)
encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
else
encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
break;
}
pce_dev->mode = creq->mode;
switch (creq->alg) {
case CIPHER_ALG_DES:
if (creq->mode != QCE_MODE_ECB) {
encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
writel_relaxed(enciv32[0], pce_dev->iobase +
CRYPTO_CNTR0_IV0_REG);
writel_relaxed(enciv32[1], pce_dev->iobase +
CRYPTO_CNTR1_IV1_REG);
} else {
encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
}
if (use_hw_key == false) {
writel_relaxed(enckey32[0], pce_dev->iobase +
CRYPTO_ENCR_KEY0_REG);
writel_relaxed(enckey32[1], pce_dev->iobase +
CRYPTO_ENCR_KEY1_REG);
}
break;
case CIPHER_ALG_3DES:
if (creq->mode != QCE_MODE_ECB) {
_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
writel_relaxed(enciv32[0], pce_dev->iobase +
CRYPTO_CNTR0_IV0_REG);
writel_relaxed(enciv32[1], pce_dev->iobase +
CRYPTO_CNTR1_IV1_REG);
encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
} else {
encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
}
if (use_hw_key == false) {
/* write encr key */
for (i = 0; i < 6; i++)
writel_relaxed(enckey32[0], (pce_dev->iobase +
(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
}
break;
case CIPHER_ALG_AES:
default:
if (creq->mode == QCE_MODE_XTS) {
uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
= {0, 0, 0, 0, 0, 0, 0, 0};
uint32_t xtsklen =
creq->encklen/(2 * sizeof(uint32_t));
if ((use_hw_key == false) && (use_pipe_key == false)) {
_byte_stream_to_net_words(xtskey32,
(creq->enckey + creq->encklen/2),
creq->encklen/2);
/* write xts encr key */
for (i = 0; i < xtsklen; i++)
writel_relaxed(xtskey32[i],
pce_dev->iobase +
CRYPTO_ENCR_XTS_KEY0_REG +
(i * sizeof(uint32_t)));
}
/* write xts du size */
if (use_pipe_key == true)
writel_relaxed(min((uint32_t)QCE_SECTOR_SIZE,
creq->cryptlen),
pce_dev->iobase +
CRYPTO_ENCR_XTS_DU_SIZE_REG);
else
writel_relaxed(creq->cryptlen ,
pce_dev->iobase +
CRYPTO_ENCR_XTS_DU_SIZE_REG);
}
if (creq->mode != QCE_MODE_ECB) {
if (creq->mode == QCE_MODE_XTS)
_byte_stream_swap_to_net_words(enciv32,
creq->iv, ivsize);
else
_byte_stream_to_net_words(enciv32, creq->iv,
ivsize);
/* write encr cntr iv */
for (i = 0; i <= 3; i++)
writel_relaxed(enciv32[i], pce_dev->iobase +
CRYPTO_CNTR0_IV0_REG +
(i * sizeof(uint32_t)));
if (creq->mode == QCE_MODE_CCM) {
/* write cntr iv for ccm */
for (i = 0; i <= 3; i++)
writel_relaxed(enciv32[i],
pce_dev->iobase +
CRYPTO_ENCR_CCM_INT_CNTR0_REG +
(i * sizeof(uint32_t)));
/* update cntr_iv[3] by one */
writel_relaxed((enciv32[3] + 1),
pce_dev->iobase +
CRYPTO_CNTR0_IV0_REG +
(3 * sizeof(uint32_t)));
}
}
if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
CRYPTO_ENCR_KEY_SZ);
} else {
if ((use_hw_key == false) && (use_pipe_key == false)) {
for (i = 0; i < enck_size_in_word; i++)
writel_relaxed(enckey32[i],
pce_dev->iobase +
CRYPTO_ENCR_KEY0_REG +
(i * sizeof(uint32_t)));
}
} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
break;
} /* end of switch (creq->mode) */
if (use_pipe_key)
encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
<< CRYPTO_USE_PIPE_KEY_ENCR);
/* write encr seg cfg */
encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
if (use_hw_key == true)
encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
else
encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
/* write encr seg cfg */
writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
/* write encr seg size */
if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
writel_relaxed((creq->cryptlen + creq->authsize),
pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
else
writel_relaxed(creq->cryptlen,
pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
/* write encr seg start */
writel_relaxed((coffset & 0xffff),
pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
/* write encr seg start */
writel_relaxed(0xffffffff,
pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
/* write seg size */
writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
writel_relaxed(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
CRYPTO_CONFIG_REG));
/* issue go to crypto */
if (use_hw_key == false)
writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
pce_dev->iobase + CRYPTO_GOPROC_REG);
else
writel_relaxed(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
/*
* Ensure previous instructions (setting the GO register)
* was completed before issuing a DMA transfer request
*/
mb();
return 0;
};
static int _qce_unlock_other_pipes(struct qce_device *pce_dev)
{
int rc = 0;
if (pce_dev->support_cmd_dscr == false)
return rc;
pce_dev->ce_sps.consumer.event.callback = NULL;
rc = sps_transfer_one(pce_dev->ce_sps.consumer.pipe,
GET_PHYS_ADDR(pce_dev->ce_sps.cmdlistptr.unlock_all_pipes.cmdlist),
0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
if (rc) {
pr_err("sps_xfr_one() fail rc=%d", rc);
rc = -EINVAL;
}
return rc;
}
static int _aead_complete(struct qce_device *pce_dev)
{
struct aead_request *areq;
unsigned char mac[SHA256_DIGEST_SIZE];
uint32_t status;
int32_t result_status;
areq = (struct aead_request *) pce_dev->areq;
if (areq->src != areq->dst) {
qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
DMA_FROM_DEVICE);
}
qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
DMA_TO_DEVICE);
/* check MAC */
memcpy(mac, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
SHA256_DIGEST_SIZE);
/* read status before unlock */
status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
if (_qce_unlock_other_pipes(pce_dev))
return -EINVAL;
/*
* Don't use result dump status. The operation may not
* be complete.
* Instead, use the status we just read of device.
* In case, we need to use result_status from result
* dump the result_status needs to be byte swapped,
* since we set the device to little endian.
*/
result_status = 0;
pce_dev->ce_sps.result->status = 0;
if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
| (1 << CRYPTO_HSD_ERR))) {
pr_err("aead operation error. Status %x\n", status);
result_status = -ENXIO;
} else if (pce_dev->ce_sps.consumer_status |
pce_dev->ce_sps.producer_status) {
pr_err("aead sps operation error. sps status %x %x\n",
pce_dev->ce_sps.consumer_status,
pce_dev->ce_sps.producer_status);
result_status = -ENXIO;
} else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) {
pr_err("aead operation not done? Status %x, sps status %x %x\n",
status,
pce_dev->ce_sps.consumer_status,
pce_dev->ce_sps.producer_status);
result_status = -ENXIO;
}
if (pce_dev->mode == QCE_MODE_CCM) {
if (result_status == 0 && (status & (1 << CRYPTO_MAC_FAILED)))
result_status = -EBADMSG;
pce_dev->qce_cb(areq, mac, NULL, result_status);
} else {
uint32_t ivsize = 0;
struct crypto_aead *aead;
unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
aead = crypto_aead_reqtfm(areq);
ivsize = crypto_aead_ivsize(aead);
if (pce_dev->ce_sps.minor_version != 0)
dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
ivsize, DMA_TO_DEVICE);
memcpy(iv, (char *)(pce_dev->ce_sps.result->encr_cntr_iv),
sizeof(iv));
pce_dev->qce_cb(areq, mac, iv, result_status);
}
return 0;
};
static int _sha_complete(struct qce_device *pce_dev)
{
struct ahash_request *areq;
unsigned char digest[SHA256_DIGEST_SIZE];
uint32_t bytecount32[2];
int32_t result_status = pce_dev->ce_sps.result->status;
uint32_t status;
areq = (struct ahash_request *) pce_dev->areq;
qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
DMA_TO_DEVICE);
memcpy(digest, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
SHA256_DIGEST_SIZE);
_byte_stream_to_net_words(bytecount32,
(unsigned char *)pce_dev->ce_sps.result->auth_byte_count,
2 * CRYPTO_REG_SIZE);
/* read status before unlock */
status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
if (_qce_unlock_other_pipes(pce_dev))
return -EINVAL;
/*
* Don't use result dump status. The operation may not be complete.
* Instead, use the status we just read of device.
* In case, we need to use result_status from result
* dump the result_status needs to be byte swapped,
* since we set the device to little endian.
*/
if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
| (1 << CRYPTO_HSD_ERR))) {
pr_err("sha operation error. Status %x\n", status);
result_status = -ENXIO;
} else if (pce_dev->ce_sps.consumer_status) {
pr_err("sha sps operation error. sps status %x\n",
pce_dev->ce_sps.consumer_status);
result_status = -ENXIO;
} else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) {
pr_err("sha operation not done? Status %x, sps status %x\n",
status, pce_dev->ce_sps.consumer_status);
result_status = -ENXIO;
} else {
result_status = 0;
}
pce_dev->qce_cb(areq, digest, (char *)bytecount32,
result_status);
return 0;
};
static int _ablk_cipher_complete(struct qce_device *pce_dev)
{
struct ablkcipher_request *areq;
unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
uint32_t status;
int32_t result_status;
areq = (struct ablkcipher_request *) pce_dev->areq;
if (areq->src != areq->dst) {
qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
pce_dev->dst_nents, DMA_FROM_DEVICE);
}
qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
/* read status before unlock */
status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
if (_qce_unlock_other_pipes(pce_dev))
return -EINVAL;
/*
* Don't use result dump status. The operation may not be complete.
* Instead, use the status we just read of device.
* In case, we need to use result_status from result
* dump the result_status needs to be byte swapped,
* since we set the device to little endian.
*/
if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
| (1 << CRYPTO_HSD_ERR))) {
pr_err("ablk_cipher operation error. Status %x\n",
status);
result_status = -ENXIO;
} else if (pce_dev->ce_sps.consumer_status |
pce_dev->ce_sps.producer_status) {
pr_err("ablk_cipher sps operation error. sps status %x %x\n",
pce_dev->ce_sps.consumer_status,
pce_dev->ce_sps.producer_status);
result_status = -ENXIO;
} else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) {
pr_err("ablk_cipher operation not done? Status %x, sps status %x %x\n",
status,
pce_dev->ce_sps.consumer_status,
pce_dev->ce_sps.producer_status);
result_status = -ENXIO;
} else {
result_status = 0;
}
if (pce_dev->mode == QCE_MODE_ECB) {
pce_dev->qce_cb(areq, NULL, NULL,
pce_dev->ce_sps.consumer_status |
result_status);
} else {
if (pce_dev->ce_sps.minor_version == 0) {
if (pce_dev->mode == QCE_MODE_CBC) {
if (pce_dev->dir == QCE_DECRYPT)
memcpy(iv, (char *)pce_dev->dec_iv,
sizeof(iv));
else
memcpy(iv, (unsigned char *)
(sg_virt(areq->src) +
areq->src->length - 16),
sizeof(iv));
}
if ((pce_dev->mode == QCE_MODE_CTR) ||
(pce_dev->mode == QCE_MODE_XTS)) {
uint32_t num_blk = 0;
uint32_t cntr_iv3 = 0;
unsigned long long cntr_iv64 = 0;
unsigned char *b = (unsigned char *)(&cntr_iv3);
memcpy(iv, areq->info, sizeof(iv));
if (pce_dev->mode != QCE_MODE_XTS)
num_blk = areq->nbytes/16;
else
num_blk = 1;
cntr_iv3 = ((*(iv + 12) << 24) & 0xff000000) |
(((*(iv + 13)) << 16) & 0xff0000) |
(((*(iv + 14)) << 8) & 0xff00) |
(*(iv + 15) & 0xff);
cntr_iv64 =
(((unsigned long long)cntr_iv3 &
(unsigned long long)0xFFFFFFFFULL) +
(unsigned long long)num_blk) %
(unsigned long long)(0x100000000ULL);
cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
*(iv + 15) = (char)(*b);
*(iv + 14) = (char)(*(b + 1));
*(iv + 13) = (char)(*(b + 2));
*(iv + 12) = (char)(*(b + 3));
}
} else {
memcpy(iv,
(char *)(pce_dev->ce_sps.result->encr_cntr_iv),
sizeof(iv));
}
pce_dev->qce_cb(areq, NULL, iv, result_status);
}
return 0;
};
#ifdef QCE_DEBUG
static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
{
int i, j, ents;
struct sps_iovec *iovec = pce_dev->ce_sps.in_transfer.iovec;
uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
printk(KERN_INFO "==============================================\n");
printk(KERN_INFO "CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
printk(KERN_INFO "==============================================\n");
for (i = 0; i < pce_dev->ce_sps.in_transfer.iovec_count; i++) {
printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
iovec->addr, iovec->size, iovec->flags);
if (iovec->flags & cmd_flags) {
struct sps_command_element *pced;
pced = (struct sps_command_element *)
(GET_VIRT_ADDR(iovec->addr));
ents = iovec->size/(sizeof(struct sps_command_element));
for (j = 0; j < ents; j++) {
printk(KERN_INFO " [%d] [0x%x] 0x%x\n", j,
pced->addr, pced->data);
pced++;
}
}
iovec++;
}
printk(KERN_INFO "==============================================\n");
printk(KERN_INFO "PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
printk(KERN_INFO "==============================================\n");
iovec = pce_dev->ce_sps.out_transfer.iovec;
for (i = 0; i < pce_dev->ce_sps.out_transfer.iovec_count; i++) {
printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
iovec->addr, iovec->size, iovec->flags);
iovec++;
}
}
#else
static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
{
}
#endif
static void _qce_dump_descr_fifos_fail(struct qce_device *pce_dev)
{
int i, j, ents;
struct sps_iovec *iovec = pce_dev->ce_sps.in_transfer.iovec;
uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
printk(KERN_INFO "==============================================\n");
printk(KERN_INFO "CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
printk(KERN_INFO "==============================================\n");
for (i = 0; i < pce_dev->ce_sps.in_transfer.iovec_count; i++) {
printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
iovec->addr, iovec->size, iovec->flags);
if (iovec->flags & cmd_flags) {
struct sps_command_element *pced;
pced = (struct sps_command_element *)
(GET_VIRT_ADDR(iovec->addr));
ents = iovec->size/(sizeof(struct sps_command_element));
for (j = 0; j < ents; j++) {
printk(KERN_INFO " [%d] [0x%x] 0x%x\n", j,
pced->addr, pced->data);
pced++;
}
}
iovec++;
}
printk(KERN_INFO "==============================================\n");
printk(KERN_INFO "PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
printk(KERN_INFO "==============================================\n");
iovec = pce_dev->ce_sps.out_transfer.iovec;
for (i = 0; i < pce_dev->ce_sps.out_transfer.iovec_count; i++) {
printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
iovec->addr, iovec->size, iovec->flags);
iovec++;
}
}
static void _qce_sps_iovec_count_init(struct qce_device *pce_dev)
{
pce_dev->ce_sps.in_transfer.iovec_count = 0;
pce_dev->ce_sps.out_transfer.iovec_count = 0;
}
static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
{
struct sps_iovec *iovec = sps_bam_pipe->iovec +
(sps_bam_pipe->iovec_count - 1);
iovec->flags |= flag;
}
static int _qce_sps_add_data(uint32_t addr, uint32_t len,
struct sps_transfer *sps_bam_pipe)
{
struct sps_iovec *iovec = sps_bam_pipe->iovec +
sps_bam_pipe->iovec_count;
if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
pr_err("Num of descrptor %d exceed max (%d)",
sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
return -ENOMEM;
}
if (len) {
iovec->size = len;
iovec->addr = addr;
iovec->flags = 0;
sps_bam_pipe->iovec_count++;
}
return 0;
}
static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
struct scatterlist *sg_src, uint32_t nbytes,
struct sps_transfer *sps_bam_pipe)
{
uint32_t addr, data_cnt, len;
struct sps_iovec *iovec = sps_bam_pipe->iovec +
sps_bam_pipe->iovec_count;
while (nbytes > 0) {
len = min(nbytes, sg_dma_len(sg_src));
nbytes -= len;
addr = sg_dma_address(sg_src);
if (pce_dev->ce_sps.minor_version == 0)
len = ALIGN(len, pce_dev->ce_sps.ce_burst_size);
while (len > 0) {
if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
pr_err("Num of descrptor %d exceed max (%d)",
sps_bam_pipe->iovec_count,
(uint32_t)QCE_MAX_NUM_DSCR);
return -ENOMEM;
}
if (len > SPS_MAX_PKT_SIZE) {
data_cnt = SPS_MAX_PKT_SIZE;
iovec->size = data_cnt;
iovec->addr = addr;
iovec->flags = 0;
} else {
data_cnt = len;
iovec->size = data_cnt;
iovec->addr = addr;
iovec->flags = 0;
}
iovec++;
sps_bam_pipe->iovec_count++;
addr += data_cnt;
len -= data_cnt;
}
sg_src = scatterwalk_sg_next(sg_src);
}
return 0;
}
static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
struct qce_cmdlist_info *cmdptr,
struct sps_transfer *sps_bam_pipe)
{
struct sps_iovec *iovec = sps_bam_pipe->iovec +
sps_bam_pipe->iovec_count;
iovec->size = cmdptr->size;
iovec->addr = GET_PHYS_ADDR(cmdptr->cmdlist);
iovec->flags = SPS_IOVEC_FLAG_CMD | flag;
sps_bam_pipe->iovec_count++;
return 0;
}
static int _qce_sps_transfer(struct qce_device *pce_dev)
{
int rc = 0;
_qce_dump_descr_fifos(pce_dev);
rc = sps_transfer(pce_dev->ce_sps.consumer.pipe,
&pce_dev->ce_sps.in_transfer);
if (rc) {
pr_err("sps_xfr() fail (consumer pipe=0x%x) rc = %d,",
(u32)pce_dev->ce_sps.consumer.pipe, rc);
_qce_dump_descr_fifos_fail(pce_dev);
return rc;
}
rc = sps_transfer(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.out_transfer);
if (rc) {
pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,",
(u32)pce_dev->ce_sps.producer.pipe, rc);
return rc;
}
return rc;
}
/**
* Allocate and Connect a CE peripheral's SPS endpoint
*
* This function allocates endpoint context and
* connect it with memory endpoint by calling
* appropriate SPS driver APIs.
*
* Also registers a SPS callback function with
* SPS driver
*
* This function should only be called once typically
* during driver probe.
*
* @pce_dev - Pointer to qce_device structure
* @ep - Pointer to sps endpoint data structure
* @is_produce - 1 means Producer endpoint
* 0 means Consumer endpoint
*
* @return - 0 if successful else negative value.
*
*/
static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
struct qce_sps_ep_conn_data *ep,
bool is_producer)
{
int rc = 0;
struct sps_pipe *sps_pipe_info;
struct sps_connect *sps_connect_info = &ep->connect;
struct sps_register_event *sps_event = &ep->event;
/* Allocate endpoint context */
sps_pipe_info = sps_alloc_endpoint();
if (!sps_pipe_info) {
pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
is_producer);
rc = -ENOMEM;
goto out;
}
/* Now save the sps pipe handle */
ep->pipe = sps_pipe_info;
/* Get default connection configuration for an endpoint */
rc = sps_get_config(sps_pipe_info, sps_connect_info);
if (rc) {
pr_err("sps_get_config() fail pipe_handle=0x%x, rc = %d\n",
(u32)sps_pipe_info, rc);
goto get_config_err;
}
/* Modify the default connection configuration */
if (is_producer) {
/*
* For CE producer transfer, source should be
* CE peripheral where as destination should
* be system memory.
*/
sps_connect_info->source = pce_dev->ce_sps.bam_handle;
sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
/* Producer pipe will handle this connection */
sps_connect_info->mode = SPS_MODE_SRC;
sps_connect_info->options =
SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
} else {
/* For CE consumer transfer, source should be
* system memory where as destination should
* CE peripheral
*/
sps_connect_info->source = SPS_DEV_HANDLE_MEM;
sps_connect_info->destination = pce_dev->ce_sps.bam_handle;
sps_connect_info->mode = SPS_MODE_DEST;
sps_connect_info->options =
SPS_O_AUTO_ENABLE | SPS_O_EOT;
}
/* Producer pipe index */
sps_connect_info->src_pipe_index = pce_dev->ce_sps.src_pipe_index;
/* Consumer pipe index */
sps_connect_info->dest_pipe_index = pce_dev->ce_sps.dest_pipe_index;
/* Set pipe group */
sps_connect_info->lock_group = pce_dev->ce_sps.pipe_pair_index;
sps_connect_info->event_thresh = 0x10;
/*
* Max. no of scatter/gather buffers that can
* be passed by block layer = 32 (NR_SG).
* Each BAM descritor needs 64 bits (8 bytes).
* One BAM descriptor is required per buffer transfer.
* So we would require total 256 (32 * 8) bytes of descriptor FIFO.
* But due to HW limitation we need to allocate atleast one extra
* descriptor memory (256 bytes + 8 bytes). But in order to be
* in power of 2, we are allocating 512 bytes of memory.
*/
sps_connect_info->desc.size = QCE_MAX_NUM_DSCR *
sizeof(struct sps_iovec);
sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
sps_connect_info->desc.size,
&sps_connect_info->desc.phys_base,
GFP_KERNEL);
if (sps_connect_info->desc.base == NULL) {
rc = -ENOMEM;
pr_err("Can not allocate coherent memory for sps data\n");
goto get_config_err;
}
memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
/* Establish connection between peripheral and memory endpoint */
rc = sps_connect(sps_pipe_info, sps_connect_info);
if (rc) {
pr_err("sps_connect() fail pipe_handle=0x%x, rc = %d\n",
(u32)sps_pipe_info, rc);
goto sps_connect_err;
}
sps_event->mode = SPS_TRIGGER_CALLBACK;
if (is_producer)
sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
else
sps_event->options = SPS_O_EOT;
sps_event->xfer_done = NULL;
sps_event->user = (void *)pce_dev;
pr_debug("success, %s : pipe_handle=0x%x, desc fifo base (phy) = 0x%x\n",
is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
(u32)sps_pipe_info, sps_connect_info->desc.phys_base);
goto out;
sps_connect_err:
dma_free_coherent(pce_dev->pdev,
sps_connect_info->desc.size,
sps_connect_info->desc.base,
sps_connect_info->desc.phys_base);
get_config_err:
sps_free_endpoint(sps_pipe_info);
out:
return rc;
}
/**
* Disconnect and Deallocate a CE peripheral's SPS endpoint
*
* This function disconnect endpoint and deallocates
* endpoint context.
*
* This function should only be called once typically
* during driver remove.
*
* @pce_dev - Pointer to qce_device structure
* @ep - Pointer to sps endpoint data structure
*
*/
static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
struct qce_sps_ep_conn_data *ep)
{
struct sps_pipe *sps_pipe_info = ep->pipe;
struct sps_connect *sps_connect_info = &ep->connect;
sps_disconnect(sps_pipe_info);
dma_free_coherent(pce_dev->pdev,
sps_connect_info->desc.size,
sps_connect_info->desc.base,
sps_connect_info->desc.phys_base);
sps_free_endpoint(sps_pipe_info);
}
/**
* Initialize SPS HW connected with CE core
*
* This function register BAM HW resources with
* SPS driver and then initialize 2 SPS endpoints
*
* This function should only be called once typically
* during driver probe.
*
* @pce_dev - Pointer to qce_device structure
*
* @return - 0 if successful else negative value.
*
*/
static int qce_sps_init(struct qce_device *pce_dev)
{
int rc = 0;
struct sps_bam_props bam = {0};
bool register_bam = false;
bam.phys_addr = pce_dev->ce_sps.bam_mem;
bam.virt_addr = pce_dev->ce_sps.bam_iobase;
/*
* This event thresold value is only significant for BAM-to-BAM
* transfer. It's ignored for BAM-to-System mode transfer.
*/
bam.event_threshold = 0x10; /* Pipe event threshold */
/*
* This threshold controls when the BAM publish
* the descriptor size on the sideband interface.
* SPS HW will only be used when
* data transfer size > 64 bytes.
*/
bam.summing_threshold = 64;
/* SPS driver wll handle the crypto BAM IRQ */
bam.irq = (u32)pce_dev->ce_sps.bam_irq;
/*
* Set flag to indicate BAM global device control is managed
* remotely.
*/
if ((pce_dev->support_cmd_dscr == false) || (pce_dev->is_shared))
bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
else
bam.manage = SPS_BAM_MGR_LOCAL;
bam.ee = 1;
pr_debug("bam physical base=0x%x\n", (u32)bam.phys_addr);
pr_debug("bam virtual base=0x%x\n", (u32)bam.virt_addr);
mutex_lock(&bam_register_cnt);
if (ce_bam_registered == false) {
bam_registry.handle = 0;
bam_registry.cnt = 0;
}
if ((bam_registry.handle == 0) && (bam_registry.cnt == 0)) {
/* Register CE Peripheral BAM device to SPS driver */
rc = sps_register_bam_device(&bam, &bam_registry.handle);
if (rc) {
mutex_unlock(&bam_register_cnt);
pr_err("sps_register_bam_device() failed! err=%d", rc);
return -EIO;
}
bam_registry.cnt++;
register_bam = true;
ce_bam_registered = true;
} else {
bam_registry.cnt++;
}
mutex_unlock(&bam_register_cnt);
pce_dev->ce_sps.bam_handle = bam_registry.handle;
pr_debug("BAM device registered. bam_handle=0x%x",
pce_dev->ce_sps.bam_handle);
rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.producer, true);
if (rc)
goto sps_connect_producer_err;
rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.consumer, false);
if (rc)
goto sps_connect_consumer_err;
pce_dev->ce_sps.out_transfer.user = pce_dev->ce_sps.producer.pipe;
pce_dev->ce_sps.in_transfer.user = pce_dev->ce_sps.consumer.pipe;
pr_info(" Qualcomm MSM CE-BAM at 0x%016llx irq %d\n",
(unsigned long long)pce_dev->ce_sps.bam_mem,
(unsigned int)pce_dev->ce_sps.bam_irq);
return rc;
sps_connect_consumer_err:
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
sps_connect_producer_err:
if (register_bam) {
mutex_lock(&bam_register_cnt);
sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
ce_bam_registered = false;
bam_registry.handle = 0;
bam_registry.cnt = 0;
mutex_unlock(&bam_register_cnt);
}
return rc;
}
/**
* De-initialize SPS HW connected with CE core
*
* This function deinitialize SPS endpoints and then
* deregisters BAM resources from SPS driver.
*
* This function should only be called once typically
* during driver remove.
*
* @pce_dev - Pointer to qce_device structure
*
*/
static void qce_sps_exit(struct qce_device *pce_dev)
{
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.consumer);
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
mutex_lock(&bam_register_cnt);
if ((bam_registry.handle != 0) && (bam_registry.cnt == 1)) {
sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
bam_registry.cnt = 0;
bam_registry.handle = 0;
}
if ((bam_registry.handle != 0) && (bam_registry.cnt > 1))
bam_registry.cnt--;
mutex_unlock(&bam_register_cnt);
iounmap(pce_dev->ce_sps.bam_iobase);
}
static void _aead_sps_producer_callback(struct sps_event_notify *notify)
{
struct qce_device *pce_dev = (struct qce_device *)
((struct sps_event_notify *)notify)->user;
pce_dev->ce_sps.notify = *notify;
pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
notify->event_id,
notify->data.transfer.iovec.addr,
notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags);
if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
/* done */
_aead_complete(pce_dev);
} else {
int rc = 0;
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
pce_dev->ce_sps.out_transfer.iovec_count = 0;
_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
&pce_dev->ce_sps.out_transfer);
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
rc = sps_transfer(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.out_transfer);
if (rc) {
pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,",
(u32)pce_dev->ce_sps.producer.pipe, rc);
}
}
};
static void _sha_sps_producer_callback(struct sps_event_notify *notify)
{
struct qce_device *pce_dev = (struct qce_device *)
((struct sps_event_notify *)notify)->user;
pce_dev->ce_sps.notify = *notify;
pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
notify->event_id,
notify->data.transfer.iovec.addr,
notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags);
/* done */
_sha_complete(pce_dev);
};
static void _ablk_cipher_sps_producer_callback(struct sps_event_notify *notify)
{
struct qce_device *pce_dev = (struct qce_device *)
((struct sps_event_notify *)notify)->user;
pce_dev->ce_sps.notify = *notify;
pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
notify->event_id,
notify->data.transfer.iovec.addr,
notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags);
if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
/* done */
_ablk_cipher_complete(pce_dev);
} else {
int rc = 0;
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
pce_dev->ce_sps.out_transfer.iovec_count = 0;
_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
&pce_dev->ce_sps.out_transfer);
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
rc = sps_transfer(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.out_transfer);
if (rc) {
pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,",
(u32)pce_dev->ce_sps.producer.pipe, rc);
}
}
};
static void qce_add_cmd_element(struct qce_device *pdev,
struct sps_command_element **cmd_ptr, u32 addr,
u32 data, struct sps_command_element **populate)
{
(*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
(*cmd_ptr)->data = data;
(*cmd_ptr)->mask = 0xFFFFFFFF;
if (populate != NULL)
*populate = *cmd_ptr;
(*cmd_ptr)++ ;
}
static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev,
unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
bool key_128)
{
struct sps_command_element *ce_vaddr;
uint32_t ce_vaddr_start;
struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
struct qce_cmdlist_info *pcl_info = NULL;
int i = 0;
uint32_t encr_cfg = 0;
uint32_t key_reg = 0;
uint32_t xts_key_reg = 0;
uint32_t iv_reg = 0;
*pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)),
pdev->ce_sps.ce_burst_size);
ce_vaddr = (struct sps_command_element *)(*pvaddr);
ce_vaddr_start = (uint32_t)(*pvaddr);
/*
* Designate chunks of the allocated memory to various
* command list pointers related to AES cipher operations defined
* in ce_cmdlistptrs_ops structure.
*/
switch (mode) {
case QCE_MODE_CBC:
case QCE_MODE_CTR:
if (key_128 == true) {
cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
if (mode == QCE_MODE_CBC)
encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
else
encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
iv_reg = 4;
key_reg = 4;
xts_key_reg = 0;
} else {
cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
if (mode == QCE_MODE_CBC)
encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
else
encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
iv_reg = 4;
key_reg = 8;
xts_key_reg = 0;
}
break;
case QCE_MODE_ECB:
if (key_128 == true) {
cmdlistptr->cipher_aes_128_ecb.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
iv_reg = 0;
key_reg = 4;
xts_key_reg = 0;
} else {
cmdlistptr->cipher_aes_256_ecb.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
iv_reg = 0;
key_reg = 8;
xts_key_reg = 0;
}
break;
case QCE_MODE_XTS:
if (key_128 == true) {
cmdlistptr->cipher_aes_128_xts.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->cipher_aes_128_xts);
encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
iv_reg = 4;
key_reg = 4;
xts_key_reg = 4;
} else {
cmdlistptr->cipher_aes_256_xts.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->cipher_aes_256_xts);
encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
iv_reg = 4;
key_reg = 8;
xts_key_reg = 8;
}
break;
default:
pr_err("Unknown mode of operation %d received, exiting now\n",
mode);
return -EINVAL;
break;
}
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
&pcl_info->seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
&pcl_info->encr_seg_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
&pcl_info->encr_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
&pcl_info->encr_seg_start);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
(uint32_t)0xffffffff, &pcl_info->encr_mask);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
&pcl_info->auth_seg_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
&pcl_info->encr_key);
for (i = 1; i < key_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
0, NULL);
if (xts_key_reg) {
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
0, &pcl_info->encr_xts_key);
for (i = 1; i < xts_key_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_ENCR_XTS_KEY0_REG +
i * sizeof(uint32_t)), 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr,
CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
&pcl_info->encr_xts_du_size);
}
if (iv_reg) {
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
&pcl_info->encr_cntr_iv);
for (i = 1; i < iv_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
0, NULL);
}
/* Add dummy to align size to burst-size multiple */
if (mode == QCE_MODE_XTS) {
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
0, &pcl_info->auth_seg_size);
} else {
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
0, &pcl_info->auth_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
0, &pcl_info->auth_seg_size);
}
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_le, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
&pcl_info->go_proc);
pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
*pvaddr = (unsigned char *) ce_vaddr;
return 0;
}
static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev,
unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
bool mode_cbc)
{
struct sps_command_element *ce_vaddr;
uint32_t ce_vaddr_start;
struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
struct qce_cmdlist_info *pcl_info = NULL;
int i = 0;
uint32_t encr_cfg = 0;
uint32_t key_reg = 0;
uint32_t iv_reg = 0;
*pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)),
pdev->ce_sps.ce_burst_size);
ce_vaddr = (struct sps_command_element *)(*pvaddr);
ce_vaddr_start = (uint32_t)(*pvaddr);
/*
* Designate chunks of the allocated memory to various
* command list pointers related to cipher operations defined
* in ce_cmdlistptrs_ops structure.
*/
switch (alg) {
case CIPHER_ALG_DES:
if (mode_cbc) {
cmdlistptr->cipher_des_cbc.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->cipher_des_cbc);
encr_cfg = pdev->reg.encr_cfg_des_cbc;
iv_reg = 2;
key_reg = 2;
} else {
cmdlistptr->cipher_des_ecb.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->cipher_des_ecb);
encr_cfg = pdev->reg.encr_cfg_des_ecb;
iv_reg = 0;
key_reg = 2;
}
break;
case CIPHER_ALG_3DES:
if (mode_cbc) {
cmdlistptr->cipher_3des_cbc.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->cipher_3des_cbc);
encr_cfg = pdev->reg.encr_cfg_3des_cbc;
iv_reg = 2;
key_reg = 6;
} else {
cmdlistptr->cipher_3des_ecb.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->cipher_3des_ecb);
encr_cfg = pdev->reg.encr_cfg_3des_ecb;
iv_reg = 0;
key_reg = 6;
}
break;
default:
pr_err("Unknown algorithms %d received, exiting now\n", alg);
return -EINVAL;
break;
}
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
&pcl_info->seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
&pcl_info->encr_seg_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
&pcl_info->encr_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
&pcl_info->encr_seg_start);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
&pcl_info->auth_seg_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
&pcl_info->encr_key);
for (i = 1; i < key_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
0, NULL);
if (iv_reg) {
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
&pcl_info->encr_cntr_iv);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
NULL);
}
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_le, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
&pcl_info->go_proc);
pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
*pvaddr = (unsigned char *) ce_vaddr;
return 0;
}
static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
unsigned char **pvaddr, enum qce_hash_alg_enum alg,
bool key_128)
{
struct sps_command_element *ce_vaddr;
uint32_t ce_vaddr_start;
struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
struct qce_cmdlist_info *pcl_info = NULL;
int i = 0;
uint32_t key_reg = 0;
uint32_t auth_cfg = 0;
uint32_t iv_reg = 0;
*pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)),
pdev->ce_sps.ce_burst_size);
ce_vaddr_start = (uint32_t)(*pvaddr);
ce_vaddr = (struct sps_command_element *)(*pvaddr);
/*
* Designate chunks of the allocated memory to various
* command list pointers related to authentication operations
* defined in ce_cmdlistptrs_ops structure.
*/
switch (alg) {
case QCE_HASH_SHA1:
cmdlistptr->auth_sha1.cmdlist = (uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->auth_sha1);
auth_cfg = pdev->reg.auth_cfg_sha1;
iv_reg = 5;
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
break;
case QCE_HASH_SHA256:
cmdlistptr->auth_sha256.cmdlist = (uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->auth_sha256);
auth_cfg = pdev->reg.auth_cfg_sha256;
iv_reg = 8;
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
/* 1 dummy write */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
0, NULL);
break;
case QCE_HASH_SHA1_HMAC:
cmdlistptr->auth_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->auth_sha1_hmac);
auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
key_reg = 16;
iv_reg = 5;
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
break;
case QCE_HASH_SHA256_HMAC:
cmdlistptr->auth_sha256_hmac.cmdlist = (uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->auth_sha256_hmac);
auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
key_reg = 16;
iv_reg = 8;
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
/* 1 dummy write */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
0, NULL);
break;
case QCE_HASH_AES_CMAC:
if (key_128 == true) {
cmdlistptr->auth_aes_128_cmac.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->auth_aes_128_cmac);
auth_cfg = pdev->reg.auth_cfg_cmac_128;
key_reg = 4;
} else {
cmdlistptr->auth_aes_256_cmac.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->auth_aes_256_cmac);
auth_cfg = pdev->reg.auth_cfg_cmac_256;
key_reg = 8;
}
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
/* 1 dummy write */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
0, NULL);
break;
default:
pr_err("Unknown algorithms %d received, exiting now\n", alg);
return -EINVAL;
break;
}
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
&pcl_info->seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
&pcl_info->encr_seg_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
auth_cfg, &pcl_info->auth_seg_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
&pcl_info->auth_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
&pcl_info->auth_seg_start);
if (alg == QCE_HASH_AES_CMAC) {
/* reset auth iv, bytecount and key registers */
for (i = 0; i < 16; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
0, NULL);
for (i = 0; i < 16; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
0, NULL);
} else {
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
&pcl_info->auth_iv);
for (i = 1; i < iv_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
0, &pcl_info->auth_bytecount);
}
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
if (key_reg) {
qce_add_cmd_element(pdev, &ce_vaddr,
CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
for (i = 1; i < key_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
0, NULL);
}
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_le, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
&pcl_info->go_proc);
pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
*pvaddr = (unsigned char *) ce_vaddr;
return 0;
}
static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
unsigned char **pvaddr,
uint32_t alg,
uint32_t mode,
uint32_t key_size)
{
struct sps_command_element *ce_vaddr;
uint32_t ce_vaddr_start;
struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
struct qce_cmdlist_info *pcl_info = NULL;
uint32_t key_reg;
uint32_t iv_reg;
uint32_t i;
uint32_t enciv_in_word;
uint32_t encr_cfg;
*pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)),
pdev->ce_sps.ce_burst_size);
ce_vaddr_start = (uint32_t)(*pvaddr);
ce_vaddr = (struct sps_command_element *)(*pvaddr);
switch (alg) {
case CIPHER_ALG_DES:
switch (mode) {
case QCE_MODE_ECB:
cmdlistptr->aead_hmac_sha1_ecb_des.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->aead_hmac_sha1_ecb_des);
encr_cfg = pdev->reg.encr_cfg_des_ecb;
break;
case QCE_MODE_CBC:
cmdlistptr->aead_hmac_sha1_cbc_des.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->aead_hmac_sha1_cbc_des);
encr_cfg = pdev->reg.encr_cfg_des_cbc;
break;
default:
return -EINVAL;
};
enciv_in_word = 2;
break;
case CIPHER_ALG_3DES:
switch (mode) {
case QCE_MODE_ECB:
cmdlistptr->aead_hmac_sha1_ecb_3des.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->aead_hmac_sha1_ecb_3des);
encr_cfg = pdev->reg.encr_cfg_3des_ecb;
break;
case QCE_MODE_CBC:
cmdlistptr->aead_hmac_sha1_cbc_3des.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->aead_hmac_sha1_cbc_3des);
encr_cfg = pdev->reg.encr_cfg_3des_cbc;
break;
default:
return -EINVAL;
};
enciv_in_word = 2;
break;
case CIPHER_ALG_AES:
switch (mode) {
case QCE_MODE_ECB:
if (key_size == AES128_KEY_SIZE) {
cmdlistptr->aead_hmac_sha1_ecb_aes_128.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->
aead_hmac_sha1_ecb_aes_128);
encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
} else if (key_size == AES256_KEY_SIZE) {
cmdlistptr->aead_hmac_sha1_ecb_aes_256.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->
aead_hmac_sha1_ecb_aes_256);
encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
} else {
return -EINVAL;
}
break;
case QCE_MODE_CBC:
if (key_size == AES128_KEY_SIZE) {
cmdlistptr->aead_hmac_sha1_cbc_aes_128.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->
aead_hmac_sha1_cbc_aes_128);
encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
} else if (key_size == AES256_KEY_SIZE) {
cmdlistptr->aead_hmac_sha1_cbc_aes_256.cmdlist =
(uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->
aead_hmac_sha1_cbc_aes_256);
encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
} else {
return -EINVAL;
}
break;
default:
return -EINVAL;
};
enciv_in_word = 4;
break;
default:
return -EINVAL;
};
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
key_reg = key_size/sizeof(uint32_t);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
&pcl_info->encr_key);
for (i = 1; i < key_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
0, NULL);
if (mode != QCE_MODE_ECB) {
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
&pcl_info->encr_cntr_iv);
for (i = 1; i < enciv_in_word; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
0, NULL);
};
iv_reg = 5;
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
&pcl_info->auth_iv);
for (i = 1; i < iv_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
0, &pcl_info->auth_bytecount);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
&pcl_info->auth_key);
for (i = 1; i < key_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
&pcl_info->seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
&pcl_info->encr_seg_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
&pcl_info->encr_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
&pcl_info->encr_seg_start);
qce_add_cmd_element(
pdev,
&ce_vaddr,
CRYPTO_AUTH_SEG_CFG_REG,
pdev->reg.auth_cfg_aead_sha1_hmac,
&pcl_info->auth_seg_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
&pcl_info->auth_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
&pcl_info->auth_seg_start);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_le, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
&pcl_info->go_proc);
pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
*pvaddr = (unsigned char *) ce_vaddr;
return 0;
}
static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev,
unsigned char **pvaddr, bool key_128)
{
struct sps_command_element *ce_vaddr;
uint32_t ce_vaddr_start;
struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
struct qce_cmdlist_info *pcl_info = NULL;
int i = 0;
uint32_t encr_cfg = 0;
uint32_t auth_cfg = 0;
uint32_t key_reg = 0;
*pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)),
pdev->ce_sps.ce_burst_size);
ce_vaddr_start = (uint32_t)(*pvaddr);
ce_vaddr = (struct sps_command_element *)(*pvaddr);
/*
* Designate chunks of the allocated memory to various
* command list pointers related to aead operations
* defined in ce_cmdlistptrs_ops structure.
*/
if (key_128 == true) {
cmdlistptr->aead_aes_128_ccm.cmdlist = (uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->aead_aes_128_ccm);
auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
key_reg = 4;
} else {
cmdlistptr->aead_aes_256_ccm.cmdlist = (uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->aead_aes_256_ccm);
auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
key_reg = 8;
}
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
&pcl_info->seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
encr_cfg, &pcl_info->encr_seg_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
&pcl_info->encr_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
&pcl_info->encr_seg_start);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
(uint32_t)0xffffffff, &pcl_info->encr_mask);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
auth_cfg, &pcl_info->auth_seg_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
&pcl_info->auth_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
&pcl_info->auth_seg_start);
/* reset auth iv, bytecount and key registers */
for (i = 0; i < 8; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
0, NULL);
for (i = 0; i < 16; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
0, NULL);
/* set auth key */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
&pcl_info->auth_key);
for (i = 1; i < key_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
0, NULL);
/* set NONCE info */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
&pcl_info->auth_nonce_info);
for (i = 1; i < 4; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_AUTH_INFO_NONCE0_REG +
i * sizeof(uint32_t)), 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
&pcl_info->encr_key);
for (i = 1; i < key_reg; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
&pcl_info->encr_cntr_iv);
for (i = 1; i < 4; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
&pcl_info->encr_ccm_cntr_iv);
for (i = 1; i < 4; i++)
qce_add_cmd_element(pdev, &ce_vaddr,
(CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_le, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
&pcl_info->go_proc);
pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
*pvaddr = (unsigned char *) ce_vaddr;
return 0;
}
static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
unsigned char **pvaddr)
{
struct sps_command_element *ce_vaddr;
uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
struct qce_cmdlist_info *pcl_info = NULL;
*pvaddr = (unsigned char *) ALIGN(((unsigned int)(*pvaddr)),
pdev->ce_sps.ce_burst_size);
ce_vaddr = (struct sps_command_element *)(*pvaddr);
cmdlistptr->unlock_all_pipes.cmdlist = (uint32_t)ce_vaddr;
pcl_info = &(cmdlistptr->unlock_all_pipes);
/*
* Designate chunks of the allocated memory to command list
* to unlock pipes.
*/
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
CRYPTO_CONFIG_RESET, NULL);
pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
*pvaddr = (unsigned char *) ce_vaddr;
return 0;
}
static int qce_setup_cmdlistptrs(struct qce_device *pdev,
unsigned char **pvaddr)
{
struct sps_command_element *ce_vaddr =
(struct sps_command_element *)(*pvaddr);
/*
* Designate chunks of the allocated memory to various
* command list pointers related to operations defined
* in ce_cmdlistptrs_ops structure.
*/
ce_vaddr =
(struct sps_command_element *) ALIGN(((unsigned int) ce_vaddr),
pdev->ce_sps.ce_burst_size);
*pvaddr = (unsigned char *) ce_vaddr;
_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, true);
_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, true);
_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, true);
_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, true);
_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, false);
_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, false);
_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, false);
_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, false);
_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, true);
_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, false);
_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, true);
_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, false);
_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1, false);
_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256, false);
_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1_HMAC, false);
_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256_HMAC, false);
_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, true);
_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, false);
_setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, QCE_MODE_CBC,
DES_KEY_SIZE);
_setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, QCE_MODE_ECB,
DES_KEY_SIZE);
_setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, QCE_MODE_CBC,
DES3_EDE_KEY_SIZE);
_setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, QCE_MODE_ECB,
DES3_EDE_KEY_SIZE);
_setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_CBC,
AES128_KEY_SIZE);
_setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_ECB,
AES128_KEY_SIZE);
_setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_CBC,
AES256_KEY_SIZE);
_setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_ECB,
AES256_KEY_SIZE);
_setup_aead_ccm_cmdlistptrs(pdev, pvaddr, true);
_setup_aead_ccm_cmdlistptrs(pdev, pvaddr, false);
_setup_unlock_pipe_cmdlistptrs(pdev, pvaddr);
return 0;
}
static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
{
unsigned char *vaddr;
vaddr = pce_dev->coh_vmem;
vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr),
pce_dev->ce_sps.ce_burst_size);
/* Allow for 256 descriptor (cmd and data) entries per pipe */
pce_dev->ce_sps.in_transfer.iovec = (struct sps_iovec *)vaddr;
pce_dev->ce_sps.in_transfer.iovec_phys =
(uint32_t)GET_PHYS_ADDR(vaddr);
vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec);
pce_dev->ce_sps.out_transfer.iovec = (struct sps_iovec *)vaddr;
pce_dev->ce_sps.out_transfer.iovec_phys =
(uint32_t)GET_PHYS_ADDR(vaddr);
vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec);
if (pce_dev->support_cmd_dscr)
qce_setup_cmdlistptrs(pce_dev, &vaddr);
vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr),
pce_dev->ce_sps.ce_burst_size);
pce_dev->ce_sps.result_dump = (uint32_t)vaddr;
pce_dev->ce_sps.result = (struct ce_result_dump_format *)vaddr;
vaddr += CRYPTO_RESULT_DUMP_SIZE;
pce_dev->ce_sps.ignore_buffer = (uint32_t)vaddr;
vaddr += pce_dev->ce_sps.ce_burst_size * 2;
if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize)
panic("qce50: Not enough coherent memory. Allocate %x , need %x",
pce_dev->memsize, vaddr - pce_dev->coh_vmem);
return 0;
}
static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
{
uint32_t beats = (pce_dev->ce_sps.ce_burst_size >> 3) - 1;
uint32_t pipe_pair = pce_dev->ce_sps.pipe_pair_index;
pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) |
BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) |
(pipe_pair << CRYPTO_PIPE_SET_SELECT);
pce_dev->reg.crypto_cfg_le =
(pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
/* Initialize encr_cfg register for AES alg */
pce_dev->reg.encr_cfg_aes_cbc_128 =
(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_aes_cbc_256 =
(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_aes_ctr_128 =
(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_aes_ctr_256 =
(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_aes_xts_128 =
(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_aes_xts_256 =
(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_aes_ecb_128 =
(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_aes_ecb_256 =
(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_aes_ccm_128 =
(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
pce_dev->reg.encr_cfg_aes_ccm_256 =
(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
/* Initialize encr_cfg register for DES alg */
pce_dev->reg.encr_cfg_des_ecb =
(CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_des_cbc =
(CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_3des_ecb =
(CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
pce_dev->reg.encr_cfg_3des_cbc =
(CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
/* Initialize auth_cfg register for CMAC alg */
pce_dev->reg.auth_cfg_cmac_128 =
(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
(CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
pce_dev->reg.auth_cfg_cmac_256 =
(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
(CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
/* Initialize auth_cfg register for HMAC alg */
pce_dev->reg.auth_cfg_hmac_sha1 =
(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
pce_dev->reg.auth_cfg_hmac_sha256 =
(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
/* Initialize auth_cfg register for SHA1/256 alg */
pce_dev->reg.auth_cfg_sha1 =
(CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
pce_dev->reg.auth_cfg_sha256 =
(CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
/* Initialize auth_cfg register for AEAD alg */
pce_dev->reg.auth_cfg_aead_sha1_hmac =
(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
pce_dev->reg.auth_cfg_aead_sha256_hmac =
(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
pce_dev->reg.auth_cfg_aes_ccm_128 =
(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
pce_dev->reg.auth_cfg_aes_ccm_256 =
(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
return 0;
}
static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
{
struct qce_device *pce_dev = (struct qce_device *) handle;
struct aead_request *areq = (struct aead_request *) q_req->areq;
uint32_t authsize = q_req->authsize;
uint32_t totallen_in, out_len;
uint32_t hw_pad_out = 0;
int rc = 0;
int ce_burst_size;
struct qce_cmdlist_info *cmdlistinfo = NULL;
ce_burst_size = pce_dev->ce_sps.ce_burst_size;
totallen_in = areq->cryptlen + areq->assoclen;
if (q_req->dir == QCE_ENCRYPT) {
q_req->cryptlen = areq->cryptlen;
out_len = areq->cryptlen + authsize;
hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
} else {
q_req->cryptlen = areq->cryptlen - authsize;
out_len = q_req->cryptlen;
hw_pad_out = authsize;
}
if (pce_dev->ce_sps.minor_version == 0) {
/*
* For crypto 5.0 that has burst size alignment requirement
* for data descritpor,
* the agent above(qcrypto) prepares the src scatter list with
* memory starting with associated data, followed by
* data stream to be ciphered.
* The destination scatter list is pointing to the same
* data area as source.
*/
pce_dev->src_nents = count_sg(areq->src, totallen_in);
} else {
pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
}
pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
pce_dev->authsize = q_req->authsize;
/* associated data input */
qce_dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
DMA_TO_DEVICE);
/* cipher input */
qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
/* cipher + mac output for encryption */
if (areq->src != areq->dst) {
if (pce_dev->ce_sps.minor_version == 0)
/*
* The destination scatter list is pointing to the same
* data area as src.
* Note, the associated data will be pass-through
* at the begining of destination area.
*/
pce_dev->dst_nents = count_sg(areq->dst,
out_len + areq->assoclen);
else
pce_dev->dst_nents = count_sg(areq->dst, out_len);
qce_dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
DMA_FROM_DEVICE);
} else {
pce_dev->dst_nents = pce_dev->src_nents;
}
if (pce_dev->support_cmd_dscr) {
_ce_get_cipher_cmdlistinfo(pce_dev, q_req, &cmdlistinfo);
/* set up crypto device */
rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
areq->assoclen, cmdlistinfo);
} else {
/* set up crypto device */
rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
areq->assoclen);
}
if (rc < 0)
goto bad;
/* setup for callback, and issue command to bam */
pce_dev->areq = q_req->areq;
pce_dev->qce_cb = q_req->qce_cb;
/* Register callback event for EOT (End of transfer) event. */
pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback;
pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.producer.event);
if (rc) {
pr_err("Producer callback registration failed rc = %d\n", rc);
goto bad;
}
_qce_sps_iovec_count_init(pce_dev);
if (pce_dev->support_cmd_dscr)
_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
&pce_dev->ce_sps.in_transfer);
if (pce_dev->ce_sps.minor_version == 0) {
if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
&pce_dev->ce_sps.in_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
/*
* The destination data should be big enough to
* include CCM padding.
*/
if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
areq->assoclen + hw_pad_out,
&pce_dev->ce_sps.out_transfer))
goto bad;
if (totallen_in > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer.event.options =
SPS_O_DESC_DONE;
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
if (_qce_sps_add_data(GET_PHYS_ADDR(
pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
&pce_dev->ce_sps.out_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
}
} else {
if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
&pce_dev->ce_sps.in_transfer))
goto bad;
if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
&pce_dev->ce_sps.in_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
/* Pass through to ignore associated data*/
if (_qce_sps_add_data(
GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
areq->assoclen,
&pce_dev->ce_sps.out_transfer))
goto bad;
if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
&pce_dev->ce_sps.out_transfer))
goto bad;
/* Pass through to ignore hw_pad (padding of the MAC data) */
if (_qce_sps_add_data(
GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
hw_pad_out, &pce_dev->ce_sps.out_transfer))
goto bad;
if (totallen_in > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
if (_qce_sps_add_data(
GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
&pce_dev->ce_sps.out_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
}
}
rc = _qce_sps_transfer(pce_dev);
if (rc)
goto bad;
return 0;
bad:
if (pce_dev->assoc_nents) {
qce_dma_unmap_sg(pce_dev->pdev, areq->assoc,
pce_dev->assoc_nents, DMA_TO_DEVICE);
}
if (pce_dev->src_nents) {
qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
}
if (areq->src != areq->dst) {
qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
DMA_FROM_DEVICE);
}
return rc;
}
int qce_aead_req(void *handle, struct qce_req *q_req)
{
struct qce_device *pce_dev;
struct aead_request *areq;
uint32_t authsize;
struct crypto_aead *aead;
uint32_t ivsize;
uint32_t totallen;
int rc;
struct qce_cmdlist_info *cmdlistinfo = NULL;
if (q_req->mode == QCE_MODE_CCM)
return _qce_aead_ccm_req(handle, q_req);
pce_dev = (struct qce_device *) handle;
areq = (struct aead_request *) q_req->areq;
aead = crypto_aead_reqtfm(areq);
ivsize = crypto_aead_ivsize(aead);
q_req->ivsize = ivsize;
authsize = q_req->authsize;
if (q_req->dir == QCE_ENCRYPT)
q_req->cryptlen = areq->cryptlen;
else
q_req->cryptlen = areq->cryptlen - authsize;
totallen = q_req->cryptlen + areq->assoclen + ivsize;
if (pce_dev->support_cmd_dscr) {
cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev, q_req);
if (cmdlistinfo == NULL) {
pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
q_req->alg, q_req->mode, q_req->encklen,
q_req->authsize);
return -EINVAL;
}
/* set up crypto device */
rc = _ce_setup_aead(pce_dev, q_req, totallen,
areq->assoclen + ivsize, cmdlistinfo);
if (rc < 0)
return -EINVAL;
};
pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
if (pce_dev->ce_sps.minor_version == 0) {
/*
* For crypto 5.0 that has burst size alignment requirement
* for data descritpor,
* the agent above(qcrypto) prepares the src scatter list with
* memory starting with associated data, followed by
* iv, and data stream to be ciphered.
*/
pce_dev->src_nents = count_sg(areq->src, totallen);
} else {
pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen);
};
pce_dev->ivsize = q_req->ivsize;
pce_dev->authsize = q_req->authsize;
pce_dev->phy_iv_in = 0;
/* associated data input */
qce_dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
DMA_TO_DEVICE);
/* cipher input */
qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
/* cipher + mac output for encryption */
if (areq->src != areq->dst) {
if (pce_dev->ce_sps.minor_version == 0)
/*
* The destination scatter list is pointing to the same
* data area as source.
*/
pce_dev->dst_nents = count_sg(areq->dst, totallen);
else
pce_dev->dst_nents = count_sg(areq->dst,
q_req->cryptlen);
qce_dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
DMA_FROM_DEVICE);
}
/* cipher iv for input */
if (pce_dev->ce_sps.minor_version != 0)
pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
ivsize, DMA_TO_DEVICE);
/* setup for callback, and issue command to bam */
pce_dev->areq = q_req->areq;
pce_dev->qce_cb = q_req->qce_cb;
/* Register callback event for EOT (End of transfer) event. */
pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback;
pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.producer.event);
if (rc) {
pr_err("Producer callback registration failed rc = %d\n", rc);
goto bad;
}
_qce_sps_iovec_count_init(pce_dev);
if (pce_dev->support_cmd_dscr) {
_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
&pce_dev->ce_sps.in_transfer);
} else {
rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
areq->assoclen + ivsize);
if (rc)
goto bad;
}
if (pce_dev->ce_sps.minor_version == 0) {
if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
&pce_dev->ce_sps.in_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
&pce_dev->ce_sps.out_transfer))
goto bad;
if (totallen > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer.event.options =
SPS_O_DESC_DONE;
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
if (_qce_sps_add_data(GET_PHYS_ADDR(
pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
&pce_dev->ce_sps.out_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
}
} else {
if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
&pce_dev->ce_sps.in_transfer))
goto bad;
if (_qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize,
&pce_dev->ce_sps.in_transfer))
goto bad;
if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
&pce_dev->ce_sps.in_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
/* Pass through to ignore associated + iv data*/
if (_qce_sps_add_data(
GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
(ivsize + areq->assoclen),
&pce_dev->ce_sps.out_transfer))
goto bad;
if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->cryptlen,
&pce_dev->ce_sps.out_transfer))
goto bad;
if (totallen > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
if (_qce_sps_add_data(
GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
&pce_dev->ce_sps.out_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
}
}
rc = _qce_sps_transfer(pce_dev);
if (rc)
goto bad;
return 0;
bad:
if (pce_dev->assoc_nents) {
qce_dma_unmap_sg(pce_dev->pdev, areq->assoc,
pce_dev->assoc_nents, DMA_TO_DEVICE);
}
if (pce_dev->src_nents) {
qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
}
if (areq->src != areq->dst) {
qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
DMA_FROM_DEVICE);
}
if (pce_dev->phy_iv_in) {
dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
ivsize, DMA_TO_DEVICE);
}
return rc;
}
EXPORT_SYMBOL(qce_aead_req);
int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
{
int rc = 0;
struct qce_device *pce_dev = (struct qce_device *) handle;
struct ablkcipher_request *areq = (struct ablkcipher_request *)
c_req->areq;
struct qce_cmdlist_info *cmdlistinfo = NULL;
pce_dev->src_nents = 0;
pce_dev->dst_nents = 0;
/* cipher input */
pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
/* cipher output */
if (areq->src != areq->dst) {
pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
qce_dma_map_sg(pce_dev->pdev, areq->dst,
pce_dev->dst_nents, DMA_FROM_DEVICE);
} else {
pce_dev->dst_nents = pce_dev->src_nents;
}
pce_dev->dir = c_req->dir;
if ((pce_dev->ce_sps.minor_version == 0) && (c_req->dir == QCE_DECRYPT)
&& (c_req->mode == QCE_MODE_CBC)) {
memcpy(pce_dev->dec_iv, (unsigned char *)sg_virt(areq->src) +
areq->src->length - 16,
NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
}
/* set up crypto device */
if (pce_dev->support_cmd_dscr) {
_ce_get_cipher_cmdlistinfo(pce_dev, c_req, &cmdlistinfo);
rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0,
cmdlistinfo);
} else {
rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->nbytes, 0);
}
if (rc < 0)
goto bad;
/* setup for client callback, and issue command to BAM */
pce_dev->areq = areq;
pce_dev->qce_cb = c_req->qce_cb;
/* Register callback event for EOT (End of transfer) event. */
pce_dev->ce_sps.producer.event.callback =
_ablk_cipher_sps_producer_callback;
pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.producer.event);
if (rc) {
pr_err("Producer callback registration failed rc = %d\n", rc);
goto bad;
}
_qce_sps_iovec_count_init(pce_dev);
if (pce_dev->support_cmd_dscr)
_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
&pce_dev->ce_sps.in_transfer);
if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
&pce_dev->ce_sps.in_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
&pce_dev->ce_sps.out_transfer))
goto bad;
if (areq->nbytes > SPS_MAX_PKT_SIZE) {
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
} else {
pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
if (_qce_sps_add_data(
GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
&pce_dev->ce_sps.out_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer,
SPS_IOVEC_FLAG_INT);
}
rc = _qce_sps_transfer(pce_dev);
if (rc)
goto bad;
return 0;
bad:
if (areq->src != areq->dst) {
if (pce_dev->dst_nents) {
qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
pce_dev->dst_nents, DMA_FROM_DEVICE);
}
}
if (pce_dev->src_nents) {
qce_dma_unmap_sg(pce_dev->pdev, areq->src,
pce_dev->src_nents,
(areq->src == areq->dst) ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
}
return rc;
}
EXPORT_SYMBOL(qce_ablk_cipher_req);
int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
{
struct qce_device *pce_dev = (struct qce_device *) handle;
int rc;
struct ahash_request *areq = (struct ahash_request *)sreq->areq;
struct qce_cmdlist_info *cmdlistinfo = NULL;
pce_dev->src_nents = count_sg(sreq->src, sreq->size);
qce_dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
DMA_TO_DEVICE);
if (pce_dev->support_cmd_dscr) {
_ce_get_hash_cmdlistinfo(pce_dev, sreq, &cmdlistinfo);
rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
} else {
rc = _ce_setup_hash_direct(pce_dev, sreq);
}
if (rc < 0)
goto bad;
pce_dev->areq = areq;
pce_dev->qce_cb = sreq->qce_cb;
/* Register callback event for EOT (End of transfer) event. */
pce_dev->ce_sps.producer.event.callback = _sha_sps_producer_callback;
pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE;
rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
&pce_dev->ce_sps.producer.event);
if (rc) {
pr_err("Producer callback registration failed rc = %d\n", rc);
goto bad;
}
_qce_sps_iovec_count_init(pce_dev);
if (pce_dev->support_cmd_dscr)
_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
&pce_dev->ce_sps.in_transfer);
if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
&pce_dev->ce_sps.in_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.in_transfer,
SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
if (_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
CRYPTO_RESULT_DUMP_SIZE,
&pce_dev->ce_sps.out_transfer))
goto bad;
_qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT);
rc = _qce_sps_transfer(pce_dev);
if (rc)
goto bad;
return 0;
bad:
if (pce_dev->src_nents) {
qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
pce_dev->src_nents, DMA_TO_DEVICE);
}
return rc;
}
EXPORT_SYMBOL(qce_process_sha_req);
static int __qce_get_device_tree_data(struct platform_device *pdev,
struct qce_device *pce_dev)
{
struct resource *resource;
int rc = 0;
pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
"qcom,ce-hw-shared");
pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
"qcom,ce-hw-key");
if (of_property_read_u32((&pdev->dev)->of_node,
"qcom,bam-pipe-pair",
&pce_dev->ce_sps.pipe_pair_index)) {
pr_err("Fail to get bam pipe pair information.\n");
return -EINVAL;
} else {
pr_warn("bam_pipe_pair=0x%x", pce_dev->ce_sps.pipe_pair_index);
}
pce_dev->ce_sps.dest_pipe_index = 2 * pce_dev->ce_sps.pipe_pair_index;
pce_dev->ce_sps.src_pipe_index = pce_dev->ce_sps.dest_pipe_index + 1;
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"crypto-base");
if (resource) {
pce_dev->phy_iobase = resource->start;
pce_dev->iobase = ioremap_nocache(resource->start,
resource_size(resource));
if (!pce_dev->iobase) {
pr_err("Can not map CRYPTO io memory\n");
return -ENOMEM;
}
} else {
pr_err("CRYPTO HW mem unavailable.\n");
return -ENODEV;
}
pr_warn("ce_phy_reg_base=0x%x ", pce_dev->phy_iobase);
pr_warn("ce_virt_reg_base=0x%x\n", (uint32_t)pce_dev->iobase);
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"crypto-bam-base");
if (resource) {
pce_dev->ce_sps.bam_mem = resource->start;
pce_dev->ce_sps.bam_iobase = ioremap_nocache(resource->start,
resource_size(resource));
if (!pce_dev->ce_sps.bam_iobase) {
rc = -ENOMEM;
pr_err("Can not map BAM io memory\n");
goto err_getting_bam_info;
}
} else {
pr_err("CRYPTO BAM mem unavailable.\n");
rc = -ENODEV;
goto err_getting_bam_info;
}
pr_warn("ce_bam_phy_reg_base=0x%x ", pce_dev->ce_sps.bam_mem);
pr_warn("ce_bam_virt_reg_base=0x%x\n",
(uint32_t)pce_dev->ce_sps.bam_iobase);
resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (resource) {
pce_dev->ce_sps.bam_irq = resource->start;
pr_warn("CRYPTO BAM IRQ = %d.\n", pce_dev->ce_sps.bam_irq);
} else {
pr_err("CRYPTO BAM IRQ unavailable.\n");
goto err_dev;
}
return rc;
err_dev:
if (pce_dev->ce_sps.bam_iobase)
iounmap(pce_dev->ce_sps.bam_iobase);
err_getting_bam_info:
if (pce_dev->iobase)
iounmap(pce_dev->iobase);
return rc;
}
static int __qce_init_clk(struct qce_device *pce_dev)
{
int rc = 0;
struct clk *ce_core_clk;
struct clk *ce_clk;
struct clk *ce_core_src_clk;
struct clk *ce_bus_clk;
/* Get CE3 src core clk. */
ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
if (!IS_ERR(ce_core_src_clk)) {
pce_dev->ce_core_src_clk = ce_core_src_clk;
/* Set the core src clk @100Mhz */
rc = clk_set_rate(pce_dev->ce_core_src_clk, 100000000);
if (rc) {
clk_put(pce_dev->ce_core_src_clk);
pce_dev->ce_core_src_clk = NULL;
pr_err("Unable to set the core src clk @100Mhz.\n");
goto err_clk;
}
} else {
pr_warn("Unable to get CE core src clk, set to NULL\n");
pce_dev->ce_core_src_clk = NULL;
}
/* Get CE core clk */
ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
if (IS_ERR(ce_core_clk)) {
rc = PTR_ERR(ce_core_clk);
pr_err("Unable to get CE core clk\n");
if (pce_dev->ce_core_src_clk != NULL)
clk_put(pce_dev->ce_core_src_clk);
goto err_clk;
}
pce_dev->ce_core_clk = ce_core_clk;
/* Get CE Interface clk */
ce_clk = clk_get(pce_dev->pdev, "iface_clk");
if (IS_ERR(ce_clk)) {
rc = PTR_ERR(ce_clk);
pr_err("Unable to get CE interface clk\n");
if (pce_dev->ce_core_src_clk != NULL)
clk_put(pce_dev->ce_core_src_clk);
clk_put(pce_dev->ce_core_clk);
goto err_clk;
}
pce_dev->ce_clk = ce_clk;
/* Get CE AXI clk */
ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
if (IS_ERR(ce_bus_clk)) {
rc = PTR_ERR(ce_bus_clk);
pr_err("Unable to get CE BUS interface clk\n");
if (pce_dev->ce_core_src_clk != NULL)
clk_put(pce_dev->ce_core_src_clk);
clk_put(pce_dev->ce_core_clk);
clk_put(pce_dev->ce_clk);
goto err_clk;
}
pce_dev->ce_bus_clk = ce_bus_clk;
err_clk:
if (rc)
pr_err("Unable to init CE clks, rc = %d\n", rc);
return rc;
}
static void __qce_deinit_clk(struct qce_device *pce_dev)
{
if (pce_dev->ce_clk != NULL) {
clk_put(pce_dev->ce_clk);
pce_dev->ce_clk = NULL;
}
if (pce_dev->ce_core_clk != NULL) {
clk_put(pce_dev->ce_core_clk);
pce_dev->ce_core_clk = NULL;
}
if (pce_dev->ce_bus_clk != NULL) {
clk_put(pce_dev->ce_bus_clk);
pce_dev->ce_bus_clk = NULL;
}
if (pce_dev->ce_core_src_clk != NULL) {
clk_put(pce_dev->ce_core_src_clk);
pce_dev->ce_core_src_clk = NULL;
}
}
int qce_enable_clk(void *handle)
{
struct qce_device *pce_dev = (struct qce_device *) handle;
int rc = 0;
/* Enable CE core clk */
if (pce_dev->ce_core_clk != NULL) {
rc = clk_prepare_enable(pce_dev->ce_core_clk);
if (rc) {
pr_err("Unable to enable/prepare CE core clk\n");
return rc;
}
}
/* Enable CE clk */
if (pce_dev->ce_clk != NULL) {
rc = clk_prepare_enable(pce_dev->ce_clk);
if (rc) {
pr_err("Unable to enable/prepare CE iface clk\n");
clk_disable_unprepare(pce_dev->ce_core_clk);
return rc;
}
}
/* Enable AXI clk */
if (pce_dev->ce_bus_clk != NULL) {
rc = clk_prepare_enable(pce_dev->ce_bus_clk);
if (rc) {
pr_err("Unable to enable/prepare CE BUS clk\n");
clk_disable_unprepare(pce_dev->ce_clk);
clk_disable_unprepare(pce_dev->ce_core_clk);
return rc;
}
}
return rc;
}
EXPORT_SYMBOL(qce_enable_clk);
int qce_disable_clk(void *handle)
{
struct qce_device *pce_dev = (struct qce_device *) handle;
int rc = 0;
if (pce_dev->ce_clk != NULL)
clk_disable_unprepare(pce_dev->ce_clk);
if (pce_dev->ce_core_clk != NULL)
clk_disable_unprepare(pce_dev->ce_core_clk);
if (pce_dev->ce_bus_clk != NULL)
clk_disable_unprepare(pce_dev->ce_bus_clk);
return rc;
}
EXPORT_SYMBOL(qce_disable_clk);
/* crypto engine open function. */
void *qce_open(struct platform_device *pdev, int *rc)
{
struct qce_device *pce_dev;
uint32_t bam_cfg = 0 ;
pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
if (!pce_dev) {
*rc = -ENOMEM;
pr_err("Can not allocate memory: %d\n", *rc);
return NULL;
}
pce_dev->pdev = &pdev->dev;
if (pdev->dev.of_node) {
*rc = __qce_get_device_tree_data(pdev, pce_dev);
if (*rc)
goto err_pce_dev;
} else {
*rc = -EINVAL;
pr_err("Device Node not found.\n");
goto err_pce_dev;
}
pce_dev->memsize = 9 * PAGE_SIZE;
pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
if (pce_dev->coh_vmem == NULL) {
*rc = -ENOMEM;
pr_err("Can not allocate coherent memory for sps data\n");
goto err_iobase;
}
*rc = __qce_init_clk(pce_dev);
if (*rc)
goto err_mem;
*rc = qce_enable_clk(pce_dev);
if (*rc)
goto err;
if (_probe_ce_engine(pce_dev)) {
*rc = -ENXIO;
goto err;
}
*rc = 0;
bam_cfg = readl_relaxed(pce_dev->ce_sps.bam_iobase +
CRYPTO_BAM_CNFG_BITS_REG);
pce_dev->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
true : false;
qce_init_ce_cfg_val(pce_dev);
qce_setup_ce_sps_data(pce_dev);
qce_sps_init(pce_dev);
qce_disable_clk(pce_dev);
return pce_dev;
err:
__qce_deinit_clk(pce_dev);
err_mem:
if (pce_dev->coh_vmem)
dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
pce_dev->coh_vmem, pce_dev->coh_pmem);
err_iobase:
if (pce_dev->ce_sps.bam_iobase)
iounmap(pce_dev->ce_sps.bam_iobase);
if (pce_dev->iobase)
iounmap(pce_dev->iobase);
err_pce_dev:
kfree(pce_dev);
return NULL;
}
EXPORT_SYMBOL(qce_open);
/* crypto engine close function. */
int qce_close(void *handle)
{
struct qce_device *pce_dev = (struct qce_device *) handle;
if (handle == NULL)
return -ENODEV;
qce_enable_clk(pce_dev);
qce_sps_exit(pce_dev);
if (pce_dev->iobase)
iounmap(pce_dev->iobase);
if (pce_dev->coh_vmem)
dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
pce_dev->coh_vmem, pce_dev->coh_pmem);
qce_disable_clk(pce_dev);
__qce_deinit_clk(pce_dev);
kfree(handle);
return 0;
}
EXPORT_SYMBOL(qce_close);
int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
{
struct qce_device *pce_dev = (struct qce_device *)handle;
if (ce_support == NULL)
return -EINVAL;
ce_support->sha1_hmac_20 = false;
ce_support->sha1_hmac = false;
ce_support->sha256_hmac = false;
ce_support->sha_hmac = true;
ce_support->cmac = true;
ce_support->aes_key_192 = false;
ce_support->aes_xts = true;
ce_support->ota = false;
ce_support->bam = true;
ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
ce_support->hw_key = pce_dev->support_hw_key;
ce_support->aes_ccm = true;
if (pce_dev->ce_sps.minor_version)
ce_support->aligned_only = false;
else
ce_support->aligned_only = true;
return 0;
}
EXPORT_SYMBOL(qce_hw_support);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Crypto Engine driver");
| MattCrystal/tripping-hipster | drivers/crypto/msm/qce50.c | C | gpl-2.0 | 128,263 |
/* GStreamer
* Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
* 2005 Wim Taymans <wim@fluendo.com>
*
* gstaudiosink.c: simple audio sink base class
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
/**
* SECTION:gstaudiosink
* @short_description: Simple base class for audio sinks
* @see_also: #GstAudioBaseSink, #GstAudioRingBuffer, #GstAudioSink.
*
* This is the most simple base class for audio sinks that only requires
* subclasses to implement a set of simple functions:
*
* <variablelist>
* <varlistentry>
* <term>open()</term>
* <listitem><para>Open the device.</para></listitem>
* </varlistentry>
* <varlistentry>
* <term>prepare()</term>
* <listitem><para>Configure the device with the specified format.</para></listitem>
* </varlistentry>
* <varlistentry>
* <term>write()</term>
* <listitem><para>Write samples to the device.</para></listitem>
* </varlistentry>
* <varlistentry>
* <term>reset()</term>
* <listitem><para>Unblock writes and flush the device.</para></listitem>
* </varlistentry>
* <varlistentry>
* <term>delay()</term>
* <listitem><para>Get the number of samples written but not yet played
* by the device.</para></listitem>
* </varlistentry>
* <varlistentry>
* <term>unprepare()</term>
* <listitem><para>Undo operations done by prepare.</para></listitem>
* </varlistentry>
* <varlistentry>
* <term>close()</term>
* <listitem><para>Close the device.</para></listitem>
* </varlistentry>
* </variablelist>
*
* All scheduling of samples and timestamps is done in this base class
* together with #GstAudioBaseSink using a default implementation of a
* #GstAudioRingBuffer that uses threads.
*/
#include <string.h>
#include <gst/audio/audio.h>
#include "gstaudiosink.h"
GST_DEBUG_CATEGORY_STATIC (gst_audio_sink_debug);
#define GST_CAT_DEFAULT gst_audio_sink_debug
#define GST_TYPE_AUDIO_SINK_RING_BUFFER \
(gst_audio_sink_ring_buffer_get_type())
#define GST_AUDIO_SINK_RING_BUFFER(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AUDIO_SINK_RING_BUFFER,GstAudioSinkRingBuffer))
#define GST_AUDIO_SINK_RING_BUFFER_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_AUDIO_SINK_RING_BUFFER,GstAudioSinkRingBufferClass))
#define GST_AUDIO_SINK_RING_BUFFER_GET_CLASS(obj) \
(G_TYPE_INSTANCE_GET_CLASS ((obj), GST_TYPE_AUDIO_SINK_RING_BUFFER, GstAudioSinkRingBufferClass))
#define GST_AUDIO_SINK_RING_BUFFER_CAST(obj) \
((GstAudioSinkRingBuffer *)obj)
#define GST_IS_AUDIO_SINK_RING_BUFFER(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AUDIO_SINK_RING_BUFFER))
#define GST_IS_AUDIO_SINK_RING_BUFFER_CLASS(klass)\
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_AUDIO_SINK_RING_BUFFER))
typedef struct _GstAudioSinkRingBuffer GstAudioSinkRingBuffer;
typedef struct _GstAudioSinkRingBufferClass GstAudioSinkRingBufferClass;
#define GST_AUDIO_SINK_RING_BUFFER_GET_COND(buf) (&(((GstAudioSinkRingBuffer *)buf)->cond))
#define GST_AUDIO_SINK_RING_BUFFER_WAIT(buf) (g_cond_wait (GST_AUDIO_SINK_RING_BUFFER_GET_COND (buf), GST_OBJECT_GET_LOCK (buf)))
#define GST_AUDIO_SINK_RING_BUFFER_SIGNAL(buf) (g_cond_signal (GST_AUDIO_SINK_RING_BUFFER_GET_COND (buf)))
#define GST_AUDIO_SINK_RING_BUFFER_BROADCAST(buf)(g_cond_broadcast (GST_AUDIO_SINK_RING_BUFFER_GET_COND (buf)))
struct _GstAudioSinkRingBuffer
{
GstAudioRingBuffer object;
gboolean running;
gint queuedseg;
GCond cond;
};
struct _GstAudioSinkRingBufferClass
{
GstAudioRingBufferClass parent_class;
};
static void gst_audio_sink_ring_buffer_class_init (GstAudioSinkRingBufferClass *
klass);
static void gst_audio_sink_ring_buffer_init (GstAudioSinkRingBuffer *
ringbuffer, GstAudioSinkRingBufferClass * klass);
static void gst_audio_sink_ring_buffer_dispose (GObject * object);
static void gst_audio_sink_ring_buffer_finalize (GObject * object);
static GstAudioRingBufferClass *ring_parent_class = NULL;
static gboolean gst_audio_sink_ring_buffer_open_device (GstAudioRingBuffer *
buf);
static gboolean gst_audio_sink_ring_buffer_close_device (GstAudioRingBuffer *
buf);
static gboolean gst_audio_sink_ring_buffer_acquire (GstAudioRingBuffer * buf,
GstAudioRingBufferSpec * spec);
static gboolean gst_audio_sink_ring_buffer_release (GstAudioRingBuffer * buf);
static gboolean gst_audio_sink_ring_buffer_start (GstAudioRingBuffer * buf);
static gboolean gst_audio_sink_ring_buffer_pause (GstAudioRingBuffer * buf);
static gboolean gst_audio_sink_ring_buffer_stop (GstAudioRingBuffer * buf);
static guint gst_audio_sink_ring_buffer_delay (GstAudioRingBuffer * buf);
static gboolean gst_audio_sink_ring_buffer_activate (GstAudioRingBuffer * buf,
gboolean active);
/* ringbuffer abstract base class */
static GType
gst_audio_sink_ring_buffer_get_type (void)
{
static GType ringbuffer_type = 0;
if (!ringbuffer_type) {
static const GTypeInfo ringbuffer_info = {
sizeof (GstAudioSinkRingBufferClass),
NULL,
NULL,
(GClassInitFunc) gst_audio_sink_ring_buffer_class_init,
NULL,
NULL,
sizeof (GstAudioSinkRingBuffer),
0,
(GInstanceInitFunc) gst_audio_sink_ring_buffer_init,
NULL
};
ringbuffer_type =
g_type_register_static (GST_TYPE_AUDIO_RING_BUFFER,
"GstAudioSinkRingBuffer", &ringbuffer_info, 0);
}
return ringbuffer_type;
}
static void
gst_audio_sink_ring_buffer_class_init (GstAudioSinkRingBufferClass * klass)
{
GObjectClass *gobject_class;
GstAudioRingBufferClass *gstringbuffer_class;
gobject_class = (GObjectClass *) klass;
gstringbuffer_class = (GstAudioRingBufferClass *) klass;
ring_parent_class = g_type_class_peek_parent (klass);
gobject_class->dispose = gst_audio_sink_ring_buffer_dispose;
gobject_class->finalize = gst_audio_sink_ring_buffer_finalize;
gstringbuffer_class->open_device =
GST_DEBUG_FUNCPTR (gst_audio_sink_ring_buffer_open_device);
gstringbuffer_class->close_device =
GST_DEBUG_FUNCPTR (gst_audio_sink_ring_buffer_close_device);
gstringbuffer_class->acquire =
GST_DEBUG_FUNCPTR (gst_audio_sink_ring_buffer_acquire);
gstringbuffer_class->release =
GST_DEBUG_FUNCPTR (gst_audio_sink_ring_buffer_release);
gstringbuffer_class->start =
GST_DEBUG_FUNCPTR (gst_audio_sink_ring_buffer_start);
gstringbuffer_class->pause =
GST_DEBUG_FUNCPTR (gst_audio_sink_ring_buffer_pause);
gstringbuffer_class->resume =
GST_DEBUG_FUNCPTR (gst_audio_sink_ring_buffer_start);
gstringbuffer_class->stop =
GST_DEBUG_FUNCPTR (gst_audio_sink_ring_buffer_stop);
gstringbuffer_class->delay =
GST_DEBUG_FUNCPTR (gst_audio_sink_ring_buffer_delay);
gstringbuffer_class->activate =
GST_DEBUG_FUNCPTR (gst_audio_sink_ring_buffer_activate);
}
typedef gint (*WriteFunc) (GstAudioSink * sink, gpointer data, guint length);
/* this internal thread does nothing else but write samples to the audio device.
* It will write each segment in the ringbuffer and will update the play
* pointer.
* The start/stop methods control the thread.
*/
static void
audioringbuffer_thread_func (GstAudioRingBuffer * buf)
{
GstAudioSink *sink;
GstAudioSinkClass *csink;
GstAudioSinkRingBuffer *abuf = GST_AUDIO_SINK_RING_BUFFER_CAST (buf);
WriteFunc writefunc;
GstMessage *message;
GValue val = { 0 };
sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
csink = GST_AUDIO_SINK_GET_CLASS (sink);
GST_DEBUG_OBJECT (sink, "enter thread");
GST_OBJECT_LOCK (abuf);
GST_DEBUG_OBJECT (sink, "signal wait");
GST_AUDIO_SINK_RING_BUFFER_SIGNAL (buf);
GST_OBJECT_UNLOCK (abuf);
writefunc = csink->write;
if (writefunc == NULL)
goto no_function;
message = gst_message_new_stream_status (GST_OBJECT_CAST (buf),
GST_STREAM_STATUS_TYPE_ENTER, GST_ELEMENT_CAST (sink));
g_value_init (&val, GST_TYPE_G_THREAD);
g_value_set_boxed (&val, sink->thread);
gst_message_set_stream_status_object (message, &val);
g_value_unset (&val);
GST_DEBUG_OBJECT (sink, "posting ENTER stream status");
gst_element_post_message (GST_ELEMENT_CAST (sink), message);
while (TRUE) {
gint left, len;
guint8 *readptr;
gint readseg;
/* buffer must be started */
if (gst_audio_ring_buffer_prepare_read (buf, &readseg, &readptr, &len)) {
gint written;
left = len;
do {
written = writefunc (sink, readptr, left);
GST_LOG_OBJECT (sink, "transfered %d bytes of %d from segment %d",
written, left, readseg);
if (written < 0 || written > left) {
/* might not be critical, it e.g. happens when aborting playback */
GST_WARNING_OBJECT (sink,
"error writing data in %s (reason: %s), skipping segment (left: %d, written: %d)",
GST_DEBUG_FUNCPTR_NAME (writefunc),
(errno > 1 ? g_strerror (errno) : "unknown"), left, written);
break;
}
left -= written;
readptr += written;
} while (left > 0);
/* clear written samples */
gst_audio_ring_buffer_clear (buf, readseg);
/* we wrote one segment */
gst_audio_ring_buffer_advance (buf, 1);
} else {
GST_OBJECT_LOCK (abuf);
if (!abuf->running)
goto stop_running;
if (G_UNLIKELY (g_atomic_int_get (&buf->state) ==
GST_AUDIO_RING_BUFFER_STATE_STARTED)) {
GST_OBJECT_UNLOCK (abuf);
continue;
}
GST_DEBUG_OBJECT (sink, "signal wait");
GST_AUDIO_SINK_RING_BUFFER_SIGNAL (buf);
GST_DEBUG_OBJECT (sink, "wait for action");
GST_AUDIO_SINK_RING_BUFFER_WAIT (buf);
GST_DEBUG_OBJECT (sink, "got signal");
if (!abuf->running)
goto stop_running;
GST_DEBUG_OBJECT (sink, "continue running");
GST_OBJECT_UNLOCK (abuf);
}
}
/* Will never be reached */
g_assert_not_reached ();
return;
/* ERROR */
no_function:
{
GST_DEBUG_OBJECT (sink, "no write function, exit thread");
return;
}
stop_running:
{
GST_OBJECT_UNLOCK (abuf);
GST_DEBUG_OBJECT (sink, "stop running, exit thread");
message = gst_message_new_stream_status (GST_OBJECT_CAST (buf),
GST_STREAM_STATUS_TYPE_LEAVE, GST_ELEMENT_CAST (sink));
g_value_init (&val, GST_TYPE_G_THREAD);
g_value_set_boxed (&val, sink->thread);
gst_message_set_stream_status_object (message, &val);
g_value_unset (&val);
GST_DEBUG_OBJECT (sink, "posting LEAVE stream status");
gst_element_post_message (GST_ELEMENT_CAST (sink), message);
return;
}
}
static void
gst_audio_sink_ring_buffer_init (GstAudioSinkRingBuffer * ringbuffer,
GstAudioSinkRingBufferClass * g_class)
{
ringbuffer->running = FALSE;
ringbuffer->queuedseg = 0;
g_cond_init (&ringbuffer->cond);
}
static void
gst_audio_sink_ring_buffer_dispose (GObject * object)
{
G_OBJECT_CLASS (ring_parent_class)->dispose (object);
}
static void
gst_audio_sink_ring_buffer_finalize (GObject * object)
{
GstAudioSinkRingBuffer *ringbuffer = GST_AUDIO_SINK_RING_BUFFER_CAST (object);
g_cond_clear (&ringbuffer->cond);
G_OBJECT_CLASS (ring_parent_class)->finalize (object);
}
static gboolean
gst_audio_sink_ring_buffer_open_device (GstAudioRingBuffer * buf)
{
GstAudioSink *sink;
GstAudioSinkClass *csink;
gboolean result = TRUE;
sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
csink = GST_AUDIO_SINK_GET_CLASS (sink);
if (csink->open)
result = csink->open (sink);
if (!result)
goto could_not_open;
return result;
could_not_open:
{
GST_DEBUG_OBJECT (sink, "could not open device");
return FALSE;
}
}
static gboolean
gst_audio_sink_ring_buffer_close_device (GstAudioRingBuffer * buf)
{
GstAudioSink *sink;
GstAudioSinkClass *csink;
gboolean result = TRUE;
sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
csink = GST_AUDIO_SINK_GET_CLASS (sink);
if (csink->close)
result = csink->close (sink);
if (!result)
goto could_not_close;
return result;
could_not_close:
{
GST_DEBUG_OBJECT (sink, "could not close device");
return FALSE;
}
}
static gboolean
gst_audio_sink_ring_buffer_acquire (GstAudioRingBuffer * buf,
GstAudioRingBufferSpec * spec)
{
GstAudioSink *sink;
GstAudioSinkClass *csink;
gboolean result = FALSE;
sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
csink = GST_AUDIO_SINK_GET_CLASS (sink);
if (csink->prepare)
result = csink->prepare (sink, spec);
if (!result)
goto could_not_prepare;
/* set latency to one more segment as we need some headroom */
spec->seglatency = spec->segtotal + 1;
buf->size = spec->segtotal * spec->segsize;
buf->memory = g_malloc (buf->size);
if (buf->spec.type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_RAW) {
gst_audio_format_fill_silence (buf->spec.info.finfo, buf->memory,
buf->size);
} else {
/* FIXME, non-raw formats get 0 as the empty sample */
memset (buf->memory, 0, buf->size);
}
return TRUE;
/* ERRORS */
could_not_prepare:
{
GST_DEBUG_OBJECT (sink, "could not prepare device");
return FALSE;
}
}
static gboolean
gst_audio_sink_ring_buffer_activate (GstAudioRingBuffer * buf, gboolean active)
{
GstAudioSink *sink;
GstAudioSinkRingBuffer *abuf;
GError *error = NULL;
sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
abuf = GST_AUDIO_SINK_RING_BUFFER_CAST (buf);
if (active) {
abuf->running = TRUE;
GST_DEBUG_OBJECT (sink, "starting thread");
sink->thread = g_thread_try_new ("audiosink-ringbuffer",
(GThreadFunc) audioringbuffer_thread_func, buf, &error);
if (!sink->thread || error != NULL)
goto thread_failed;
GST_DEBUG_OBJECT (sink, "waiting for thread");
/* the object lock is taken */
GST_AUDIO_SINK_RING_BUFFER_WAIT (buf);
GST_DEBUG_OBJECT (sink, "thread is started");
} else {
abuf->running = FALSE;
GST_DEBUG_OBJECT (sink, "signal wait");
GST_AUDIO_SINK_RING_BUFFER_SIGNAL (buf);
GST_OBJECT_UNLOCK (buf);
/* join the thread */
g_thread_join (sink->thread);
GST_OBJECT_LOCK (buf);
}
return TRUE;
/* ERRORS */
thread_failed:
{
if (error)
GST_ERROR_OBJECT (sink, "could not create thread %s", error->message);
else
GST_ERROR_OBJECT (sink, "could not create thread for unknown reason");
g_clear_error (&error);
return FALSE;
}
}
/* function is called with LOCK */
static gboolean
gst_audio_sink_ring_buffer_release (GstAudioRingBuffer * buf)
{
GstAudioSink *sink;
GstAudioSinkClass *csink;
gboolean result = FALSE;
sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
csink = GST_AUDIO_SINK_GET_CLASS (sink);
/* free the buffer */
g_free (buf->memory);
buf->memory = NULL;
if (csink->unprepare)
result = csink->unprepare (sink);
if (!result)
goto could_not_unprepare;
GST_DEBUG_OBJECT (sink, "unprepared");
return result;
could_not_unprepare:
{
GST_DEBUG_OBJECT (sink, "could not unprepare device");
return FALSE;
}
}
static gboolean
gst_audio_sink_ring_buffer_start (GstAudioRingBuffer * buf)
{
GstAudioSink *sink;
sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
GST_DEBUG_OBJECT (sink, "start, sending signal");
GST_AUDIO_SINK_RING_BUFFER_SIGNAL (buf);
return TRUE;
}
static gboolean
gst_audio_sink_ring_buffer_pause (GstAudioRingBuffer * buf)
{
GstAudioSink *sink;
GstAudioSinkClass *csink;
sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
csink = GST_AUDIO_SINK_GET_CLASS (sink);
/* unblock any pending writes to the audio device */
if (csink->reset) {
GST_DEBUG_OBJECT (sink, "reset...");
csink->reset (sink);
GST_DEBUG_OBJECT (sink, "reset done");
}
return TRUE;
}
static gboolean
gst_audio_sink_ring_buffer_stop (GstAudioRingBuffer * buf)
{
GstAudioSink *sink;
GstAudioSinkClass *csink;
sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
csink = GST_AUDIO_SINK_GET_CLASS (sink);
/* unblock any pending writes to the audio device */
if (csink->reset) {
GST_DEBUG_OBJECT (sink, "reset...");
csink->reset (sink);
GST_DEBUG_OBJECT (sink, "reset done");
}
#if 0
if (abuf->running) {
GST_DEBUG_OBJECT (sink, "stop, waiting...");
GST_AUDIO_SINK_RING_BUFFER_WAIT (buf);
GST_DEBUG_OBJECT (sink, "stopped");
}
#endif
return TRUE;
}
static guint
gst_audio_sink_ring_buffer_delay (GstAudioRingBuffer * buf)
{
GstAudioSink *sink;
GstAudioSinkClass *csink;
guint res = 0;
sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
csink = GST_AUDIO_SINK_GET_CLASS (sink);
if (csink->delay)
res = csink->delay (sink);
return res;
}
/* AudioSink signals and args */
enum
{
/* FILL ME */
LAST_SIGNAL
};
enum
{
ARG_0,
};
#define _do_init \
GST_DEBUG_CATEGORY_INIT (gst_audio_sink_debug, "audiosink", 0, "audiosink element");
#define gst_audio_sink_parent_class parent_class
G_DEFINE_TYPE_WITH_CODE (GstAudioSink, gst_audio_sink,
GST_TYPE_AUDIO_BASE_SINK, _do_init);
static GstAudioRingBuffer *gst_audio_sink_create_ringbuffer (GstAudioBaseSink *
sink);
static void
gst_audio_sink_class_init (GstAudioSinkClass * klass)
{
GstAudioBaseSinkClass *gstaudiobasesink_class;
gstaudiobasesink_class = (GstAudioBaseSinkClass *) klass;
gstaudiobasesink_class->create_ringbuffer =
GST_DEBUG_FUNCPTR (gst_audio_sink_create_ringbuffer);
g_type_class_ref (GST_TYPE_AUDIO_SINK_RING_BUFFER);
}
static void
gst_audio_sink_init (GstAudioSink * audiosink)
{
}
static GstAudioRingBuffer *
gst_audio_sink_create_ringbuffer (GstAudioBaseSink * sink)
{
GstAudioRingBuffer *buffer;
GST_DEBUG_OBJECT (sink, "creating ringbuffer");
buffer = g_object_new (GST_TYPE_AUDIO_SINK_RING_BUFFER, NULL);
GST_DEBUG_OBJECT (sink, "created ringbuffer @%p", buffer);
return buffer;
}
| iperry/gst-plugins-base | gst-libs/gst/audio/gstaudiosink.c | C | gpl-2.0 | 18,574 |
/*
* kernel/workqueue.c - generic async execution with shared worker pool
*
* Copyright (C) 2002 Ingo Molnar
*
* Derived from the taskqueue/keventd code by:
* David Woodhouse <dwmw2@infradead.org>
* Andrew Morton
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
* Theodore Ts'o <tytso@mit.edu>
*
* Made to use alloc_percpu by Christoph Lameter.
*
* Copyright (C) 2010 SUSE Linux Products GmbH
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
*
* This is the generic async execution mechanism. Work items as are
* executed in process context. The worker pool is shared and
* automatically managed. There is one worker pool for each CPU and
* one extra for works which are better served by workers which are
* not bound to any specific CPU.
*
* Please read Documentation/workqueue.txt for details.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/hardirq.h>
#include <linux/mempolicy.h>
#include <linux/freezer.h>
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
#include <linux/bug.h>
#include <linux/module.h>
#include "workqueue_sched.h"
enum {
/* global_cwq flags */
GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */
GCWQ_FREEZING = 1 << 1, /* freeze in progress */
/* pool flags */
POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */
/* worker flags */
WORKER_STARTED = 1 << 0, /* started */
WORKER_DIE = 1 << 1, /* die die die */
WORKER_IDLE = 1 << 2, /* is idle */
WORKER_PREP = 1 << 3, /* preparing to run works */
WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
WORKER_REBIND = 1 << 5, /* mom is home, come back */
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
WORKER_UNBOUND = 1 << 7, /* worker is unbound */
WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
/* gcwq->trustee_state */
TRUSTEE_START = 0, /* start */
TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
TRUSTEE_BUTCHER = 2, /* butcher workers */
TRUSTEE_RELEASE = 3, /* release workers */
TRUSTEE_DONE = 4, /* trustee is done */
NR_WORKER_POOLS = 2, /* # worker pools per gcwq */
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
/* call for help after 10ms
(min two ticks) */
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
CREATE_COOLDOWN = HZ, /* time to breath after fail */
TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
/*
* Rescue workers are used only on emergencies and shared by
* all cpus. Give -20.
*/
RESCUER_NICE_LEVEL = -20,
HIGHPRI_NICE_LEVEL = -20,
};
/*
* Structure fields follow one of the following exclusion rules.
*
* I: Modifiable by initialization/destruction paths and read-only for
* everyone else.
*
* P: Preemption protected. Disabling preemption is enough and should
* only be modified and accessed from the local cpu.
*
* L: gcwq->lock protected. Access with gcwq->lock held.
*
* X: During normal operation, modification requires gcwq->lock and
* should be done only from local cpu. Either disabling preemption
* on local cpu or grabbing gcwq->lock is enough for read access.
* If GCWQ_DISASSOCIATED is set, it's identical to L.
*
* F: wq->flush_mutex protected.
*
* W: workqueue_lock protected.
*/
struct global_cwq;
struct worker_pool;
/*
* The poor guys doing the actual heavy lifting. All on-duty workers
* are either serving the manager role, on idle list or on busy hash.
*/
struct worker {
/* on idle list while idle, on busy hash table while busy */
union {
struct list_head entry; /* L: while idle */
struct hlist_node hentry; /* L: while busy */
};
struct work_struct *current_work; /* L: work being processed */
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */
struct worker_pool *pool; /* I: the associated pool */
/* 64 bytes boundary on 64bit, 32 on 32bit */
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
struct work_struct rebind_work; /* L: rebind worker to cpu */
};
struct worker_pool {
struct global_cwq *gcwq; /* I: the owning gcwq */
unsigned int flags; /* X: flags */
struct list_head worklist; /* L: list of pending works */
int nr_workers; /* L: total number of workers */
int nr_idle; /* L: currently idle ones */
struct list_head idle_list; /* X: list of idle workers */
struct timer_list idle_timer; /* L: worker idle timeout */
struct timer_list mayday_timer; /* L: SOS timer for workers */
struct ida worker_ida; /* L: for worker IDs */
struct worker *first_idle; /* L: first idle worker */
};
/*
* Global per-cpu workqueue. There's one and only one for each cpu
* and all works are queued and processed here regardless of their
* target workqueues.
*/
struct global_cwq {
spinlock_t lock; /* the gcwq lock */
unsigned int cpu; /* I: the associated cpu */
unsigned int flags; /* L: GCWQ_* flags */
/* workers are chained either in busy_hash or pool idle_list */
struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
/* L: hash of busy workers */
struct worker_pool pools[2]; /* normal and highpri pools */
struct task_struct *trustee; /* L: for gcwq shutdown */
unsigned int trustee_state; /* L: trustee state */
wait_queue_head_t trustee_wait; /* trustee wait */
} ____cacheline_aligned_in_smp;
/*
* The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
* work_struct->data are used for flags and thus cwqs need to be
* aligned at two's power of the number of flag bits.
*/
struct cpu_workqueue_struct {
struct worker_pool *pool; /* I: the associated pool */
struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */
int flush_color; /* L: flushing color */
int nr_in_flight[WORK_NR_COLORS];
/* L: nr of in_flight works */
int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */
};
/*
* Structure used to wait for workqueue flush.
*/
struct wq_flusher {
struct list_head list; /* F: list of flushers */
int flush_color; /* F: flush color waiting for */
struct completion done; /* flush completion */
};
/*
* All cpumasks are assumed to be always set on UP and thus can't be
* used to determine whether there's something to be done.
*/
#ifdef CONFIG_SMP
typedef cpumask_var_t mayday_mask_t;
#define mayday_test_and_set_cpu(cpu, mask) \
cpumask_test_and_set_cpu((cpu), (mask))
#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
#define free_mayday_mask(mask) free_cpumask_var((mask))
#else
typedef unsigned long mayday_mask_t;
#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
#define alloc_mayday_mask(maskp, gfp) true
#define free_mayday_mask(mask) do { } while (0)
#endif
/*
* The externally visible workqueue abstraction is an array of
* per-CPU workqueues:
*/
struct workqueue_struct {
unsigned int flags; /* W: WQ_* flags */
union {
struct cpu_workqueue_struct __percpu *pcpu;
struct cpu_workqueue_struct *single;
unsigned long v;
} cpu_wq; /* I: cwq's */
struct list_head list; /* W: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */
int work_color; /* F: current work color */
int flush_color; /* F: current flush color */
atomic_t nr_cwqs_to_flush; /* flush in progress */
struct wq_flusher *first_flusher; /* F: first flusher */
struct list_head flusher_queue; /* F: flush waiters */
struct list_head flusher_overflow; /* F: flush overflow list */
mayday_mask_t mayday_mask; /* cpus requesting rescue */
struct worker *rescuer; /* I: rescue worker */
int nr_drainers; /* W: drain in progress */
int saved_max_active; /* W: saved cwq max_active */
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
char name[]; /* I: workqueue name */
};
/* see the comment above the definition of WQ_POWER_EFFICIENT */
#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
static bool wq_power_efficient = true;
#else
static bool wq_power_efficient;
#endif
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
struct workqueue_struct *system_wq __read_mostly;
struct workqueue_struct *system_long_wq __read_mostly;
struct workqueue_struct *system_nrt_wq __read_mostly;
struct workqueue_struct *system_unbound_wq __read_mostly;
struct workqueue_struct *system_freezable_wq __read_mostly;
struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
struct workqueue_struct *system_power_efficient_wq __read_mostly;
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_wq);
EXPORT_SYMBOL_GPL(system_long_wq);
EXPORT_SYMBOL_GPL(system_nrt_wq);
EXPORT_SYMBOL_GPL(system_unbound_wq);
EXPORT_SYMBOL_GPL(system_freezable_wq);
EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
EXPORT_SYMBOL_GPL(system_power_efficient_wq);
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
#define for_each_worker_pool(pool, gcwq) \
for ((pool) = &(gcwq)->pools[0]; \
(pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
#define for_each_busy_worker(worker, i, pos, gcwq) \
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
unsigned int sw)
{
if (cpu < nr_cpu_ids) {
if (sw & 1) {
cpu = cpumask_next(cpu, mask);
if (cpu < nr_cpu_ids)
return cpu;
}
if (sw & 2)
return WORK_CPU_UNBOUND;
}
return WORK_CPU_NONE;
}
static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
struct workqueue_struct *wq)
{
return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
}
/*
* CPU iterators
*
* An extra gcwq is defined for an invalid cpu number
* (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
* specific CPU. The following iterators are similar to
* for_each_*_cpu() iterators but also considers the unbound gcwq.
*
* for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
* for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
* for_each_cwq_cpu() : possible CPUs for bound workqueues,
* WORK_CPU_UNBOUND for unbound workqueues
*/
#define for_each_gcwq_cpu(cpu) \
for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
(cpu) < WORK_CPU_NONE; \
(cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
#define for_each_online_gcwq_cpu(cpu) \
for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
(cpu) < WORK_CPU_NONE; \
(cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
#define for_each_cwq_cpu(cpu, wq) \
for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
(cpu) < WORK_CPU_NONE; \
(cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
static void *work_debug_hint(void *addr)
{
return ((struct work_struct *) addr)->func;
}
/*
* fixup_init is called when:
* - an active object is initialized
*/
static int work_fixup_init(void *addr, enum debug_obj_state state)
{
struct work_struct *work = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
cancel_work_sync(work);
debug_object_init(work, &work_debug_descr);
return 1;
default:
return 0;
}
}
/*
* fixup_activate is called when:
* - an active object is activated
* - an unknown object is activated (might be a statically initialized object)
*/
static int work_fixup_activate(void *addr, enum debug_obj_state state)
{
struct work_struct *work = addr;
switch (state) {
case ODEBUG_STATE_NOTAVAILABLE:
/*
* This is not really a fixup. The work struct was
* statically initialized. We just make sure that it
* is tracked in the object tracker.
*/
if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
debug_object_init(work, &work_debug_descr);
debug_object_activate(work, &work_debug_descr);
return 0;
}
WARN_ON_ONCE(1);
return 0;
case ODEBUG_STATE_ACTIVE:
WARN_ON(1);
default:
return 0;
}
}
/*
* fixup_free is called when:
* - an active object is freed
*/
static int work_fixup_free(void *addr, enum debug_obj_state state)
{
struct work_struct *work = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
cancel_work_sync(work);
debug_object_free(work, &work_debug_descr);
return 1;
default:
return 0;
}
}
static struct debug_obj_descr work_debug_descr = {
.name = "work_struct",
.debug_hint = work_debug_hint,
.fixup_init = work_fixup_init,
.fixup_activate = work_fixup_activate,
.fixup_free = work_fixup_free,
};
static inline void debug_work_activate(struct work_struct *work)
{
debug_object_activate(work, &work_debug_descr);
}
static inline void debug_work_deactivate(struct work_struct *work)
{
debug_object_deactivate(work, &work_debug_descr);
}
void __init_work(struct work_struct *work, int onstack)
{
if (onstack)
debug_object_init_on_stack(work, &work_debug_descr);
else
debug_object_init(work, &work_debug_descr);
}
EXPORT_SYMBOL_GPL(__init_work);
void destroy_work_on_stack(struct work_struct *work)
{
debug_object_free(work, &work_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_work_on_stack);
#else
static inline void debug_work_activate(struct work_struct *work) { }
static inline void debug_work_deactivate(struct work_struct *work) { }
#endif
/* Serializes the accesses to the list of workqueues. */
static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
static bool workqueue_freezing; /* W: have wqs started freezing? */
/*
* The almighty global cpu workqueues. nr_running is the only field
* which is expected to be used frequently by other cpus via
* try_to_wake_up(). Put it in a separate cacheline.
*/
static DEFINE_PER_CPU(struct global_cwq, global_cwq);
static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
/*
* Global cpu workqueue and nr_running counter for unbound gcwq. The
* gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
* workers have WORKER_UNBOUND set.
*/
static struct global_cwq unbound_global_cwq;
static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
[0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */
};
static int worker_thread(void *__worker);
static int worker_pool_pri(struct worker_pool *pool)
{
return pool - pool->gcwq->pools;
}
static struct global_cwq *get_gcwq(unsigned int cpu)
{
if (cpu != WORK_CPU_UNBOUND)
return &per_cpu(global_cwq, cpu);
else
return &unbound_global_cwq;
}
static atomic_t *get_pool_nr_running(struct worker_pool *pool)
{
int cpu = pool->gcwq->cpu;
int idx = worker_pool_pri(pool);
if (cpu != WORK_CPU_UNBOUND)
return &per_cpu(pool_nr_running, cpu)[idx];
else
return &unbound_pool_nr_running[idx];
}
static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
struct workqueue_struct *wq)
{
if (!(wq->flags & WQ_UNBOUND)) {
if (likely(cpu < nr_cpu_ids))
return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
} else if (likely(cpu == WORK_CPU_UNBOUND))
return wq->cpu_wq.single;
return NULL;
}
static unsigned int work_color_to_flags(int color)
{
return color << WORK_STRUCT_COLOR_SHIFT;
}
static int get_work_color(struct work_struct *work)
{
return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
((1 << WORK_STRUCT_COLOR_BITS) - 1);
}
static int work_next_color(int color)
{
return (color + 1) % WORK_NR_COLORS;
}
/*
* A work's data points to the cwq with WORK_STRUCT_CWQ set while the
* work is on queue. Once execution starts, WORK_STRUCT_CWQ is
* cleared and the work data contains the cpu number it was last on.
*
* set_work_{cwq|cpu}() and clear_work_data() can be used to set the
* cwq, cpu or clear work->data. These functions should only be
* called while the work is owned - ie. while the PENDING bit is set.
*
* get_work_[g]cwq() can be used to obtain the gcwq or cwq
* corresponding to a work. gcwq is available once the work has been
* queued anywhere after initialization. cwq is available only from
* queueing until execution starts.
*/
static inline void set_work_data(struct work_struct *work, unsigned long data,
unsigned long flags)
{
BUG_ON(!work_pending(work));
atomic_long_set(&work->data, data | flags | work_static(work));
}
static void set_work_cwq(struct work_struct *work,
struct cpu_workqueue_struct *cwq,
unsigned long extra_flags)
{
set_work_data(work, (unsigned long)cwq,
WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
}
static void set_work_cpu(struct work_struct *work, unsigned int cpu)
{
set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
}
static void clear_work_data(struct work_struct *work)
{
set_work_data(work, WORK_STRUCT_NO_CPU, 0);
}
static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_CWQ)
return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
else
return NULL;
}
static struct global_cwq *get_work_gcwq(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
unsigned int cpu;
if (data & WORK_STRUCT_CWQ)
return ((struct cpu_workqueue_struct *)
(data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
cpu = data >> WORK_STRUCT_FLAG_BITS;
if (cpu == WORK_CPU_NONE)
return NULL;
BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
return get_gcwq(cpu);
}
/*
* Policy functions. These define the policies on how the global worker
* pools are managed. Unless noted otherwise, these functions assume that
* they're being called with gcwq->lock held.
*/
static bool __need_more_worker(struct worker_pool *pool)
{
return !atomic_read(get_pool_nr_running(pool));
}
/*
* Need to wake up a worker? Called from anything but currently
* running workers.
*
* Note that, because unbound workers never contribute to nr_running, this
* function will always return %true for unbound gcwq as long as the
* worklist isn't empty.
*/
static bool need_more_worker(struct worker_pool *pool)
{
return !list_empty(&pool->worklist) && __need_more_worker(pool);
}
/* Can I start working? Called from busy but !running workers. */
static bool may_start_working(struct worker_pool *pool)
{
return pool->nr_idle;
}
/* Do I need to keep working? Called from currently running workers. */
static bool keep_working(struct worker_pool *pool)
{
atomic_t *nr_running = get_pool_nr_running(pool);
return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
}
/* Do we need a new worker? Called from manager. */
static bool need_to_create_worker(struct worker_pool *pool)
{
return need_more_worker(pool) && !may_start_working(pool);
}
/* Do I need to be the manager? */
static bool need_to_manage_workers(struct worker_pool *pool)
{
return need_to_create_worker(pool) ||
(pool->flags & POOL_MANAGE_WORKERS);
}
/* Do we have too many workers and should some go away? */
static bool too_many_workers(struct worker_pool *pool)
{
bool managing = pool->flags & POOL_MANAGING_WORKERS;
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
int nr_busy = pool->nr_workers - nr_idle;
return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
}
/*
* Wake up functions.
*/
/* Return the first worker. Safe with preemption disabled */
static struct worker *first_worker(struct worker_pool *pool)
{
if (unlikely(list_empty(&pool->idle_list)))
return NULL;
return list_first_entry(&pool->idle_list, struct worker, entry);
}
/**
* wake_up_worker - wake up an idle worker
* @pool: worker pool to wake worker from
*
* Wake up the first idle worker of @pool.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void wake_up_worker(struct worker_pool *pool)
{
struct worker *worker = first_worker(pool);
if (likely(worker))
wake_up_process(worker->task);
}
/**
* wq_worker_waking_up - a worker is waking up
* @task: task waking up
* @cpu: CPU @task is waking up to
*
* This function is called during try_to_wake_up() when a worker is
* being awoken.
*
* CONTEXT:
* spin_lock_irq(rq->lock)
*/
void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
{
struct worker *worker = kthread_data(task);
if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(get_pool_nr_running(worker->pool));
}
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
* @cpu: CPU in question, must be the current CPU number
*
* This function is called during schedule() when a busy worker is
* going to sleep. Worker on the same cpu can be woken up by
* returning pointer to its task.
*
* CONTEXT:
* spin_lock_irq(rq->lock)
*
* RETURNS:
* Worker task on @cpu to wake up, %NULL if none.
*/
struct task_struct *wq_worker_sleeping(struct task_struct *task,
unsigned int cpu)
{
struct worker *worker = kthread_data(task), *to_wakeup = NULL;
struct worker_pool *pool = worker->pool;
atomic_t *nr_running = get_pool_nr_running(pool);
if (worker->flags & WORKER_NOT_RUNNING)
return NULL;
/* this can only happen on the local cpu */
BUG_ON(cpu != raw_smp_processor_id());
/*
* The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work().
* Please read comment there.
*
* NOT_RUNNING is clear. This means that trustee is not in
* charge and we're running on the local cpu w/ rq lock held
* and preemption disabled, which in turn means that none else
* could be manipulating idle_list, so dereferencing idle_list
* without gcwq lock is safe.
*/
if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
to_wakeup = first_worker(pool);
return to_wakeup ? to_wakeup->task : NULL;
}
/**
* worker_set_flags - set worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to set
* @wakeup: wakeup an idle worker if necessary
*
* Set @flags in @worker->flags and adjust nr_running accordingly. If
* nr_running becomes zero and @wakeup is %true, an idle worker is
* woken up.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock)
*/
static inline void worker_set_flags(struct worker *worker, unsigned int flags,
bool wakeup)
{
struct worker_pool *pool = worker->pool;
WARN_ON_ONCE(worker->task != current);
/*
* If transitioning into NOT_RUNNING, adjust nr_running and
* wake up an idle worker as necessary if requested by
* @wakeup.
*/
if ((flags & WORKER_NOT_RUNNING) &&
!(worker->flags & WORKER_NOT_RUNNING)) {
atomic_t *nr_running = get_pool_nr_running(pool);
if (wakeup) {
if (atomic_dec_and_test(nr_running) &&
!list_empty(&pool->worklist))
wake_up_worker(pool);
} else
atomic_dec(nr_running);
}
worker->flags |= flags;
}
/**
* worker_clr_flags - clear worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to clear
*
* Clear @flags in @worker->flags and adjust nr_running accordingly.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock)
*/
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
{
struct worker_pool *pool = worker->pool;
unsigned int oflags = worker->flags;
WARN_ON_ONCE(worker->task != current);
worker->flags &= ~flags;
/*
* If transitioning out of NOT_RUNNING, increment nr_running. Note
* that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
* of multiple flags, not a single flag.
*/
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(get_pool_nr_running(pool));
}
/**
* busy_worker_head - return the busy hash head for a work
* @gcwq: gcwq of interest
* @work: work to be hashed
*
* Return hash head of @gcwq for @work.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*
* RETURNS:
* Pointer to the hash head.
*/
static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
struct work_struct *work)
{
const int base_shift = ilog2(sizeof(struct work_struct));
unsigned long v = (unsigned long)work;
/* simple shift and fold hash, do we need something better? */
v >>= base_shift;
v += v >> BUSY_WORKER_HASH_ORDER;
v &= BUSY_WORKER_HASH_MASK;
return &gcwq->busy_hash[v];
}
/**
* __find_worker_executing_work - find worker which is executing a work
* @gcwq: gcwq of interest
* @bwh: hash head as returned by busy_worker_head()
* @work: work to find worker for
*
* Find a worker which is executing @work on @gcwq. @bwh should be
* the hash head obtained by calling busy_worker_head() with the same
* work.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*
* RETURNS:
* Pointer to worker which is executing @work if found, NULL
* otherwise.
*/
static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
struct hlist_head *bwh,
struct work_struct *work)
{
struct worker *worker;
struct hlist_node *tmp;
hlist_for_each_entry(worker, tmp, bwh, hentry)
if (worker->current_work == work)
return worker;
return NULL;
}
/**
* find_worker_executing_work - find worker which is executing a work
* @gcwq: gcwq of interest
* @work: work to find worker for
*
* Find a worker which is executing @work on @gcwq. This function is
* identical to __find_worker_executing_work() except that this
* function calculates @bwh itself.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*
* RETURNS:
* Pointer to worker which is executing @work if found, NULL
* otherwise.
*/
static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
struct work_struct *work)
{
return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
work);
}
/**
* insert_work - insert a work into gcwq
* @cwq: cwq @work belongs to
* @work: work to insert
* @head: insertion point
* @extra_flags: extra WORK_STRUCT_* flags to set
*
* Insert @work which belongs to @cwq into @gcwq after @head.
* @extra_flags is or'd to work_struct flags.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void insert_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work, struct list_head *head,
unsigned int extra_flags)
{
struct worker_pool *pool = cwq->pool;
/* we own @work, set data and link */
set_work_cwq(work, cwq, extra_flags);
/*
* Ensure that we get the right work->data if we see the
* result of list_add() below, see try_to_grab_pending().
*/
smp_wmb();
list_add_tail(&work->entry, head);
/*
* Ensure either worker_sched_deactivated() sees the above
* list_add_tail() or we see zero nr_running to avoid workers
* lying around lazily while there are works to be processed.
*/
smp_mb();
if (__need_more_worker(pool))
wake_up_worker(pool);
}
/*
* Test whether @work is being queued from another work executing on the
* same workqueue. This is rather expensive and should only be used from
* cold paths.
*/
static bool is_chained_work(struct workqueue_struct *wq)
{
unsigned long flags;
unsigned int cpu;
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker *worker;
struct hlist_node *pos;
int i;
spin_lock_irqsave(&gcwq->lock, flags);
for_each_busy_worker(worker, i, pos, gcwq) {
if (worker->task != current)
continue;
spin_unlock_irqrestore(&gcwq->lock, flags);
/*
* I'm @worker, no locking necessary. See if @work
* is headed to the same workqueue.
*/
return worker->current_cwq->wq == wq;
}
spin_unlock_irqrestore(&gcwq->lock, flags);
}
return false;
}
static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
struct list_head *worklist;
unsigned int work_flags;
unsigned long flags;
debug_work_activate(work);
/* if dying, only works from the same workqueue are allowed */
if (unlikely(wq->flags & WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
/* determine gcwq to use */
if (!(wq->flags & WQ_UNBOUND)) {
struct global_cwq *last_gcwq;
if (unlikely(cpu == WORK_CPU_UNBOUND))
cpu = raw_smp_processor_id();
/*
* It's multi cpu. If @wq is non-reentrant and @work
* was previously on a different cpu, it might still
* be running there, in which case the work needs to
* be queued on that cpu to guarantee non-reentrance.
*/
gcwq = get_gcwq(cpu);
if (wq->flags & WQ_NON_REENTRANT &&
(last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
struct worker *worker;
spin_lock_irqsave(&last_gcwq->lock, flags);
worker = find_worker_executing_work(last_gcwq, work);
if (worker && worker->current_cwq->wq == wq)
gcwq = last_gcwq;
else {
/* meh... not running there, queue here */
spin_unlock_irqrestore(&last_gcwq->lock, flags);
spin_lock_irqsave(&gcwq->lock, flags);
}
} else
spin_lock_irqsave(&gcwq->lock, flags);
} else {
gcwq = get_gcwq(WORK_CPU_UNBOUND);
spin_lock_irqsave(&gcwq->lock, flags);
}
/* gcwq determined, get cwq and queue */
cwq = get_cwq(gcwq->cpu, wq);
trace_workqueue_queue_work(cpu, cwq, work);
BUG_ON(!list_empty(&work->entry));
cwq->nr_in_flight[cwq->work_color]++;
work_flags = work_color_to_flags(cwq->work_color);
if (likely(cwq->nr_active < cwq->max_active)) {
trace_workqueue_activate_work(work);
cwq->nr_active++;
worklist = &cwq->pool->worklist;
} else {
work_flags |= WORK_STRUCT_DELAYED;
worklist = &cwq->delayed_works;
}
insert_work(cwq, work, worklist, work_flags);
spin_unlock_irqrestore(&gcwq->lock, flags);
}
/**
* queue_work - queue work on a workqueue
* @wq: workqueue to use
* @work: work to queue
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*
* We queue the work to the CPU on which it was submitted, but if the CPU dies
* it can be processed by another CPU.
*/
int queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
int ret;
ret = queue_work_on(get_cpu(), wq, work);
put_cpu();
return ret;
}
EXPORT_SYMBOL_GPL(queue_work);
/**
* queue_work_on - queue work on specific cpu
* @cpu: CPU number to execute work on
* @wq: workqueue to use
* @work: work to queue
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*
* We queue the work to a specific CPU, the caller must ensure it
* can't go away.
*/
int
queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
{
int ret = 0;
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_work(cpu, wq, work);
ret = 1;
}
return ret;
}
EXPORT_SYMBOL_GPL(queue_work_on);
static void delayed_work_timer_fn(unsigned long __data)
{
struct delayed_work *dwork = (struct delayed_work *)__data;
struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
}
/**
* queue_delayed_work - queue work on a workqueue after delay
* @wq: workqueue to use
* @dwork: delayable work to queue
* @delay: number of jiffies to wait before queueing
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*/
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
if (delay == 0)
return queue_work(wq, &dwork->work);
return queue_delayed_work_on(-1, wq, dwork, delay);
}
EXPORT_SYMBOL_GPL(queue_delayed_work);
/**
* queue_delayed_work_on - queue work on specific CPU after delay
* @cpu: CPU number to execute work on
* @wq: workqueue to use
* @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*/
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
int ret = 0;
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
unsigned int lcpu;
BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry));
timer_stats_timer_set_start_info(&dwork->timer);
/*
* This stores cwq for the moment, for the timer_fn.
* Note that the work's gcwq is preserved to allow
* reentrance detection for delayed works.
*/
if (!(wq->flags & WQ_UNBOUND)) {
struct global_cwq *gcwq = get_work_gcwq(work);
if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
lcpu = gcwq->cpu;
else
lcpu = raw_smp_processor_id();
} else
lcpu = WORK_CPU_UNBOUND;
set_work_cwq(work, get_cwq(lcpu, wq), 0);
timer->expires = jiffies + delay;
timer->data = (unsigned long)dwork;
timer->function = delayed_work_timer_fn;
if (unlikely(cpu >= 0))
add_timer_on(timer, cpu);
else
add_timer(timer);
ret = 1;
}
return ret;
}
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
/**
* worker_enter_idle - enter idle state
* @worker: worker which is entering idle state
*
* @worker is entering idle state. Update stats and idle timer if
* necessary.
*
* LOCKING:
* spin_lock_irq(gcwq->lock).
*/
static void worker_enter_idle(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
struct global_cwq *gcwq = pool->gcwq;
BUG_ON(worker->flags & WORKER_IDLE);
BUG_ON(!list_empty(&worker->entry) &&
(worker->hentry.next || worker->hentry.pprev));
/* can't use worker_set_flags(), also called from start_worker() */
worker->flags |= WORKER_IDLE;
pool->nr_idle++;
worker->last_active = jiffies;
/* idle_list is LIFO */
list_add(&worker->entry, &pool->idle_list);
if (likely(!(worker->flags & WORKER_ROGUE))) {
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer,
jiffies + IDLE_WORKER_TIMEOUT);
} else
wake_up_all(&gcwq->trustee_wait);
/*
* Sanity check nr_running. Because trustee releases gcwq->lock
* between setting %WORKER_ROGUE and zapping nr_running, the
* warning may trigger spuriously. Check iff trustee is idle.
*/
WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
pool->nr_workers == pool->nr_idle &&
atomic_read(get_pool_nr_running(pool)));
}
/**
* worker_leave_idle - leave idle state
* @worker: worker which is leaving idle state
*
* @worker is leaving idle state. Update stats.
*
* LOCKING:
* spin_lock_irq(gcwq->lock).
*/
static void worker_leave_idle(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
BUG_ON(!(worker->flags & WORKER_IDLE));
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
list_del_init(&worker->entry);
}
/**
* worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
* @worker: self
*
* Works which are scheduled while the cpu is online must at least be
* scheduled to a worker which is bound to the cpu so that if they are
* flushed from cpu callbacks while cpu is going down, they are
* guaranteed to execute on the cpu.
*
* This function is to be used by rogue workers and rescuers to bind
* themselves to the target cpu and may race with cpu going down or
* coming online. kthread_bind() can't be used because it may put the
* worker to already dead cpu and set_cpus_allowed_ptr() can't be used
* verbatim as it's best effort and blocking and gcwq may be
* [dis]associated in the meantime.
*
* This function tries set_cpus_allowed() and locks gcwq and verifies
* the binding against GCWQ_DISASSOCIATED which is set during
* CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
* idle state or fetches works without dropping lock, it can guarantee
* the scheduling requirement described in the first paragraph.
*
* CONTEXT:
* Might sleep. Called without any lock but returns with gcwq->lock
* held.
*
* RETURNS:
* %true if the associated gcwq is online (@worker is successfully
* bound), %false if offline.
*/
static bool worker_maybe_bind_and_lock(struct worker *worker)
__acquires(&gcwq->lock)
{
struct global_cwq *gcwq = worker->pool->gcwq;
struct task_struct *task = worker->task;
while (true) {
/*
* The following call may fail, succeed or succeed
* without actually migrating the task to the cpu if
* it races with cpu hotunplug operation. Verify
* against GCWQ_DISASSOCIATED.
*/
if (!(gcwq->flags & GCWQ_DISASSOCIATED))
set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
spin_lock_irq(&gcwq->lock);
if (gcwq->flags & GCWQ_DISASSOCIATED)
return false;
if (task_cpu(task) == gcwq->cpu &&
cpumask_equal(¤t->cpus_allowed,
get_cpu_mask(gcwq->cpu)))
return true;
spin_unlock_irq(&gcwq->lock);
/*
* We've raced with CPU hot[un]plug. Give it a breather
* and retry migration. cond_resched() is required here;
* otherwise, we might deadlock against cpu_stop trying to
* bring down the CPU on non-preemptive kernel.
*/
cpu_relax();
cond_resched();
}
}
/*
* Function for worker->rebind_work used to rebind rogue busy workers
* to the associated cpu which is coming back online. This is
* scheduled by cpu up but can race with other cpu hotplug operations
* and may be executed twice without intervening cpu down.
*/
static void worker_rebind_fn(struct work_struct *work)
{
struct worker *worker = container_of(work, struct worker, rebind_work);
struct global_cwq *gcwq = worker->pool->gcwq;
if (worker_maybe_bind_and_lock(worker))
worker_clr_flags(worker, WORKER_REBIND);
spin_unlock_irq(&gcwq->lock);
}
static struct worker *alloc_worker(void)
{
struct worker *worker;
worker = kzalloc(sizeof(*worker), GFP_KERNEL);
if (worker) {
INIT_LIST_HEAD(&worker->entry);
INIT_LIST_HEAD(&worker->scheduled);
INIT_WORK(&worker->rebind_work, worker_rebind_fn);
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
return worker;
}
/**
* create_worker - create a new workqueue worker
* @pool: pool the new worker will belong to
* @bind: whether to set affinity to @cpu or not
*
* Create a new worker which is bound to @pool. The returned worker
* can be started by calling start_worker() or destroyed using
* destroy_worker().
*
* CONTEXT:
* Might sleep. Does GFP_KERNEL allocations.
*
* RETURNS:
* Pointer to the newly created worker.
*/
static struct worker *create_worker(struct worker_pool *pool, bool bind)
{
struct global_cwq *gcwq = pool->gcwq;
bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
const char *pri = worker_pool_pri(pool) ? "H" : "";
struct worker *worker = NULL;
int id = -1;
spin_lock_irq(&gcwq->lock);
while (ida_get_new(&pool->worker_ida, &id)) {
spin_unlock_irq(&gcwq->lock);
if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
goto fail;
spin_lock_irq(&gcwq->lock);
}
spin_unlock_irq(&gcwq->lock);
worker = alloc_worker();
if (!worker)
goto fail;
worker->pool = pool;
worker->id = id;
if (!on_unbound_cpu)
worker->task = kthread_create_on_node(worker_thread,
worker, cpu_to_node(gcwq->cpu),
"kworker/%u:%d%s", gcwq->cpu, id, pri);
else
worker->task = kthread_create(worker_thread, worker,
"kworker/u:%d%s", id, pri);
if (IS_ERR(worker->task))
goto fail;
if (worker_pool_pri(pool))
set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
/*
* A rogue worker will become a regular one if CPU comes
* online later on. Make sure every worker has
* PF_THREAD_BOUND set.
*/
if (bind && !on_unbound_cpu)
kthread_bind(worker->task, gcwq->cpu);
else {
worker->task->flags |= PF_THREAD_BOUND;
if (on_unbound_cpu)
worker->flags |= WORKER_UNBOUND;
}
return worker;
fail:
if (id >= 0) {
spin_lock_irq(&gcwq->lock);
ida_remove(&pool->worker_ida, id);
spin_unlock_irq(&gcwq->lock);
}
kfree(worker);
return NULL;
}
/**
* start_worker - start a newly created worker
* @worker: worker to start
*
* Make the gcwq aware of @worker and start it.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void start_worker(struct worker *worker)
{
worker->flags |= WORKER_STARTED;
worker->pool->nr_workers++;
worker_enter_idle(worker);
wake_up_process(worker->task);
}
/**
* destroy_worker - destroy a workqueue worker
* @worker: worker to be destroyed
*
* Destroy @worker and adjust @gcwq stats accordingly.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
*/
static void destroy_worker(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
struct global_cwq *gcwq = pool->gcwq;
int id = worker->id;
/* sanity check frenzy */
BUG_ON(worker->current_work);
BUG_ON(!list_empty(&worker->scheduled));
if (worker->flags & WORKER_STARTED)
pool->nr_workers--;
if (worker->flags & WORKER_IDLE)
pool->nr_idle--;
list_del_init(&worker->entry);
worker->flags |= WORKER_DIE;
spin_unlock_irq(&gcwq->lock);
kthread_stop(worker->task);
kfree(worker);
spin_lock_irq(&gcwq->lock);
ida_remove(&pool->worker_ida, id);
}
static void idle_worker_timeout(unsigned long __pool)
{
struct worker_pool *pool = (void *)__pool;
struct global_cwq *gcwq = pool->gcwq;
spin_lock_irq(&gcwq->lock);
if (too_many_workers(pool)) {
struct worker *worker;
unsigned long expires;
/* idle_list is kept in LIFO order, check the last one */
worker = list_entry(pool->idle_list.prev, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
if (time_before(jiffies, expires))
mod_timer(&pool->idle_timer, expires);
else {
/* it's been idle for too long, wake up manager */
pool->flags |= POOL_MANAGE_WORKERS;
wake_up_worker(pool);
}
}
spin_unlock_irq(&gcwq->lock);
}
static bool send_mayday(struct work_struct *work)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct workqueue_struct *wq = cwq->wq;
unsigned int cpu;
if (!(wq->flags & WQ_RESCUER))
return false;
/* mayday mayday mayday */
cpu = cwq->pool->gcwq->cpu;
/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
if (cpu == WORK_CPU_UNBOUND)
cpu = 0;
if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
wake_up_process(wq->rescuer->task);
return true;
}
static void gcwq_mayday_timeout(unsigned long __pool)
{
struct worker_pool *pool = (void *)__pool;
struct global_cwq *gcwq = pool->gcwq;
struct work_struct *work;
spin_lock_irq(&gcwq->lock);
if (need_to_create_worker(pool)) {
/*
* We've been trying to create a new worker but
* haven't been successful. We might be hitting an
* allocation deadlock. Send distress signals to
* rescuers.
*/
list_for_each_entry(work, &pool->worklist, entry)
send_mayday(work);
}
spin_unlock_irq(&gcwq->lock);
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
/**
* maybe_create_worker - create a new worker if necessary
* @pool: pool to create a new worker for
*
* Create a new worker for @pool if necessary. @pool is guaranteed to
* have at least one idle worker on return from this function. If
* creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
* sent to all rescuers with works scheduled on @pool to resolve
* possible allocation deadlock.
*
* On return, need_to_create_worker() is guaranteed to be false and
* may_start_working() true.
*
* LOCKING:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
*
* RETURNS:
* false if no action was taken and gcwq->lock stayed locked, true
* otherwise.
*/
static bool maybe_create_worker(struct worker_pool *pool)
__releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
struct global_cwq *gcwq = pool->gcwq;
if (!need_to_create_worker(pool))
return false;
restart:
spin_unlock_irq(&gcwq->lock);
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
while (true) {
struct worker *worker;
worker = create_worker(pool, true);
if (worker) {
del_timer_sync(&pool->mayday_timer);
spin_lock_irq(&gcwq->lock);
start_worker(worker);
BUG_ON(need_to_create_worker(pool));
return true;
}
if (!need_to_create_worker(pool))
break;
__set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(CREATE_COOLDOWN);
if (!need_to_create_worker(pool))
break;
}
del_timer_sync(&pool->mayday_timer);
spin_lock_irq(&gcwq->lock);
if (need_to_create_worker(pool))
goto restart;
return true;
}
/**
* maybe_destroy_worker - destroy workers which have been idle for a while
* @pool: pool to destroy workers for
*
* Destroy @pool workers which have been idle for longer than
* IDLE_WORKER_TIMEOUT.
*
* LOCKING:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. Called only from manager.
*
* RETURNS:
* false if no action was taken and gcwq->lock stayed locked, true
* otherwise.
*/
static bool maybe_destroy_workers(struct worker_pool *pool)
{
bool ret = false;
while (too_many_workers(pool)) {
struct worker *worker;
unsigned long expires;
worker = list_entry(pool->idle_list.prev, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
if (time_before(jiffies, expires)) {
mod_timer(&pool->idle_timer, expires);
break;
}
destroy_worker(worker);
ret = true;
}
return ret;
}
/**
* manage_workers - manage worker pool
* @worker: self
*
* Assume the manager role and manage gcwq worker pool @worker belongs
* to. At any given time, there can be only zero or one manager per
* gcwq. The exclusion is handled automatically by this function.
*
* The caller can safely start processing works on false return. On
* true return, it's guaranteed that need_to_create_worker() is false
* and may_start_working() is true.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations.
*
* RETURNS:
* false if no action was taken and gcwq->lock stayed locked, true if
* some action was taken.
*/
static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
struct global_cwq *gcwq = pool->gcwq;
bool ret = false;
if (pool->flags & POOL_MANAGING_WORKERS)
return ret;
pool->flags &= ~POOL_MANAGE_WORKERS;
pool->flags |= POOL_MANAGING_WORKERS;
/*
* Destroy and then create so that may_start_working() is true
* on return.
*/
ret |= maybe_destroy_workers(pool);
ret |= maybe_create_worker(pool);
pool->flags &= ~POOL_MANAGING_WORKERS;
/*
* The trustee might be waiting to take over the manager
* position, tell it we're done.
*/
if (unlikely(gcwq->trustee))
wake_up_all(&gcwq->trustee_wait);
return ret;
}
/**
* move_linked_works - move linked works to a list
* @work: start of series of works to be scheduled
* @head: target list to append @work to
* @nextp: out paramter for nested worklist walking
*
* Schedule linked works starting from @work to @head. Work series to
* be scheduled starts at @work and includes any consecutive work with
* WORK_STRUCT_LINKED set in its predecessor.
*
* If @nextp is not NULL, it's updated to point to the next work of
* the last scheduled work. This allows move_linked_works() to be
* nested inside outer list_for_each_entry_safe().
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void move_linked_works(struct work_struct *work, struct list_head *head,
struct work_struct **nextp)
{
struct work_struct *n;
/*
* Linked worklist will always end before the end of the list,
* use NULL for list head.
*/
list_for_each_entry_safe_from(work, n, NULL, entry) {
list_move_tail(&work->entry, head);
if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
break;
}
/*
* If we're already inside safe list traversal and have moved
* multiple works to the scheduled queue, the next position
* needs to be updated.
*/
if (nextp)
*nextp = n;
}
static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
{
struct work_struct *work = list_first_entry(&cwq->delayed_works,
struct work_struct, entry);
trace_workqueue_activate_work(work);
move_linked_works(work, &cwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
cwq->nr_active++;
}
/**
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
* @cwq: cwq of interest
* @color: color of work which left the queue
* @delayed: for a delayed work
*
* A work either has completed or is removed from pending queue,
* decrement nr_in_flight of its cwq and handle workqueue flushing.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
bool delayed)
{
/* ignore uncolored works */
if (color == WORK_NO_COLOR)
return;
cwq->nr_in_flight[color]--;
if (!delayed) {
cwq->nr_active--;
if (!list_empty(&cwq->delayed_works)) {
/* one down, submit a delayed one */
if (cwq->nr_active < cwq->max_active)
cwq_activate_first_delayed(cwq);
}
}
/* is flush in progress and are we at the flushing tip? */
if (likely(cwq->flush_color != color))
return;
/* are there still in-flight works? */
if (cwq->nr_in_flight[color])
return;
/* this cwq is done, clear flush_color */
cwq->flush_color = -1;
/*
* If this was the last cwq, wake up the first flusher. It
* will handle the rest.
*/
if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
complete(&cwq->wq->first_flusher->done);
}
/**
* process_one_work - process single work
* @worker: self
* @work: work to process
*
* Process @work. This function contains all the logics necessary to
* process a single work including synchronization against and
* interaction with other workers on the same cpu, queueing and
* flushing. As long as context requirement is met, any worker can
* call this function to process a work.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
*/
static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct worker_pool *pool = worker->pool;
struct global_cwq *gcwq = pool->gcwq;
struct hlist_head *bwh = busy_worker_head(gcwq, work);
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
work_func_t f = work->func;
int work_color;
struct worker *collision;
#ifdef CONFIG_LOCKDEP
/*
* It is permissible to free the struct work_struct from
* inside the function that is called from it, this we need to
* take into account for lockdep too. To avoid bogus "held
* lock freed" warnings as well as problems when looking into
* work->lockdep_map, make a copy and use that here.
*/
struct lockdep_map lockdep_map = work->lockdep_map;
#endif
/*
* A single work shouldn't be executed concurrently by
* multiple workers on a single cpu. Check whether anyone is
* already processing the work. If so, defer the work to the
* currently executing one.
*/
collision = __find_worker_executing_work(gcwq, bwh, work);
if (unlikely(collision)) {
move_linked_works(work, &collision->scheduled, NULL);
return;
}
/* claim and process */
debug_work_deactivate(work);
hlist_add_head(&worker->hentry, bwh);
worker->current_work = work;
worker->current_cwq = cwq;
work_color = get_work_color(work);
/* record the current cpu number in the work data and dequeue */
set_work_cpu(work, gcwq->cpu);
list_del_init(&work->entry);
/*
* CPU intensive works don't participate in concurrency
* management. They're the scheduler's responsibility.
*/
if (unlikely(cpu_intensive))
worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
/*
* Unbound gcwq isn't concurrency managed and work items should be
* executed ASAP. Wake up another worker if necessary.
*/
if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
wake_up_worker(pool);
spin_unlock_irq(&gcwq->lock);
work_clear_pending(work);
lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
trace_workqueue_execute_start(work);
f(work);
/*
* While we must be careful to not use "work" after this, the trace
* point will only record its address.
*/
trace_workqueue_execute_end(work);
lock_map_release(&lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
"%s/0x%08x/%d\n",
current->comm, preempt_count(), task_pid_nr(current));
printk(KERN_ERR " last function: ");
print_symbol("%s\n", (unsigned long)f);
debug_show_held_locks(current);
BUG_ON(PANIC_CORRUPTION);
dump_stack();
}
spin_lock_irq(&gcwq->lock);
/* clear cpu intensive status */
if (unlikely(cpu_intensive))
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
/* we're done with it, release */
hlist_del_init(&worker->hentry);
worker->current_work = NULL;
worker->current_cwq = NULL;
cwq_dec_nr_in_flight(cwq, work_color, false);
}
/**
* process_scheduled_works - process scheduled works
* @worker: self
*
* Process all scheduled works. Please note that the scheduled list
* may change while processing a work, so this function repeatedly
* fetches a work from the top and executes it.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times.
*/
static void process_scheduled_works(struct worker *worker)
{
while (!list_empty(&worker->scheduled)) {
struct work_struct *work = list_first_entry(&worker->scheduled,
struct work_struct, entry);
process_one_work(worker, work);
}
}
/**
* worker_thread - the worker thread function
* @__worker: self
*
* The gcwq worker thread function. There's a single dynamic pool of
* these per each cpu. These workers process all works regardless of
* their specific target workqueue. The only exception is works which
* belong to workqueues with a rescuer which will be explained in
* rescuer_thread().
*/
static int worker_thread(void *__worker)
{
struct worker *worker = __worker;
struct worker_pool *pool = worker->pool;
struct global_cwq *gcwq = pool->gcwq;
/* tell the scheduler that this is a workqueue worker */
worker->task->flags |= PF_WQ_WORKER;
woke_up:
spin_lock_irq(&gcwq->lock);
/* DIE can be set only while we're idle, checking here is enough */
if (worker->flags & WORKER_DIE) {
spin_unlock_irq(&gcwq->lock);
worker->task->flags &= ~PF_WQ_WORKER;
return 0;
}
worker_leave_idle(worker);
recheck:
/* no more worker necessary? */
if (!need_more_worker(pool))
goto sleep;
/* do we need to manage? */
if (unlikely(!may_start_working(pool)) && manage_workers(worker))
goto recheck;
/*
* ->scheduled list can only be filled while a worker is
* preparing to process a work or actually processing it.
* Make sure nobody diddled with it while I was sleeping.
*/
BUG_ON(!list_empty(&worker->scheduled));
/*
* When control reaches this point, we're guaranteed to have
* at least one idle worker or that someone else has already
* assumed the manager role.
*/
worker_clr_flags(worker, WORKER_PREP);
do {
struct work_struct *work =
list_first_entry(&pool->worklist,
struct work_struct, entry);
if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
/* optimization path, not strictly necessary */
process_one_work(worker, work);
if (unlikely(!list_empty(&worker->scheduled)))
process_scheduled_works(worker);
} else {
move_linked_works(work, &worker->scheduled, NULL);
process_scheduled_works(worker);
}
} while (keep_working(pool));
worker_set_flags(worker, WORKER_PREP, false);
sleep:
if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
goto recheck;
/*
* gcwq->lock is held and there's no work to process and no
* need to manage, sleep. Workers are woken up only while
* holding gcwq->lock or from local cpu, so setting the
* current state before releasing gcwq->lock is enough to
* prevent losing any event.
*/
worker_enter_idle(worker);
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&gcwq->lock);
schedule();
goto woke_up;
}
/**
* rescuer_thread - the rescuer thread function
* @__wq: the associated workqueue
*
* Workqueue rescuer thread function. There's one rescuer for each
* workqueue which has WQ_RESCUER set.
*
* Regular work processing on a gcwq may block trying to create a new
* worker which uses GFP_KERNEL allocation which has slight chance of
* developing into deadlock if some works currently on the same queue
* need to be processed to satisfy the GFP_KERNEL allocation. This is
* the problem rescuer solves.
*
* When such condition is possible, the gcwq summons rescuers of all
* workqueues which have works queued on the gcwq and let them process
* those works so that forward progress can be guaranteed.
*
* This should happen rarely.
*/
static int rescuer_thread(void *__wq)
{
struct workqueue_struct *wq = __wq;
struct worker *rescuer = wq->rescuer;
struct list_head *scheduled = &rescuer->scheduled;
bool is_unbound = wq->flags & WQ_UNBOUND;
unsigned int cpu;
set_user_nice(current, RESCUER_NICE_LEVEL);
repeat:
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
return 0;
/*
* See whether any cpu is asking for help. Unbounded
* workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
*/
for_each_mayday_cpu(cpu, wq->mayday_mask) {
unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
struct worker_pool *pool = cwq->pool;
struct global_cwq *gcwq = pool->gcwq;
struct work_struct *work, *n;
__set_current_state(TASK_RUNNING);
mayday_clear_cpu(cpu, wq->mayday_mask);
/* migrate to the target cpu if possible */
rescuer->pool = pool;
worker_maybe_bind_and_lock(rescuer);
/*
* Slurp in all works issued via this workqueue and
* process'em.
*/
BUG_ON(!list_empty(&rescuer->scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry)
if (get_work_cwq(work) == cwq)
move_linked_works(work, scheduled, &n);
process_scheduled_works(rescuer);
/*
* Leave this gcwq. If keep_working() is %true, notify a
* regular worker; otherwise, we end up with 0 concurrency
* and stalling the execution.
*/
if (keep_working(pool))
wake_up_worker(pool);
spin_unlock_irq(&gcwq->lock);
}
schedule();
goto repeat;
}
struct wq_barrier {
struct work_struct work;
struct completion done;
};
static void wq_barrier_func(struct work_struct *work)
{
struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
complete(&barr->done);
}
/**
* insert_wq_barrier - insert a barrier work
* @cwq: cwq to insert barrier into
* @barr: wq_barrier to insert
* @target: target work to attach @barr to
* @worker: worker currently executing @target, NULL if @target is not executing
*
* @barr is linked to @target such that @barr is completed only after
* @target finishes execution. Please note that the ordering
* guarantee is observed only with respect to @target and on the local
* cpu.
*
* Currently, a queued barrier can't be canceled. This is because
* try_to_grab_pending() can't determine whether the work to be
* grabbed is at the head of the queue and thus can't clear LINKED
* flag of the previous work while there must be a valid next work
* after a work with LINKED flag set.
*
* Note that when @worker is non-NULL, @target may be modified
* underneath us, so we can't reliably determine cwq from @target.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
struct wq_barrier *barr,
struct work_struct *target, struct worker *worker)
{
struct list_head *head;
unsigned int linked = 0;
/*
* debugobject calls are safe here even with gcwq->lock locked
* as we know for sure that this will not trigger any of the
* checks and call back into the fixup functions where we
* might deadlock.
*/
INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
init_completion(&barr->done);
/*
* If @target is currently being executed, schedule the
* barrier to the worker; otherwise, put it after @target.
*/
if (worker)
head = worker->scheduled.next;
else {
unsigned long *bits = work_data_bits(target);
head = target->entry.next;
/* there can already be other linked works, inherit and set */
linked = *bits & WORK_STRUCT_LINKED;
__set_bit(WORK_STRUCT_LINKED_BIT, bits);
}
debug_work_activate(&barr->work);
insert_work(cwq, &barr->work, head,
work_color_to_flags(WORK_NO_COLOR) | linked);
}
/**
* flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
* @wq: workqueue being flushed
* @flush_color: new flush color, < 0 for no-op
* @work_color: new work color, < 0 for no-op
*
* Prepare cwqs for workqueue flushing.
*
* If @flush_color is non-negative, flush_color on all cwqs should be
* -1. If no cwq has in-flight commands at the specified color, all
* cwq->flush_color's stay at -1 and %false is returned. If any cwq
* has in flight commands, its cwq->flush_color is set to
* @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
* wakeup logic is armed and %true is returned.
*
* The caller should have initialized @wq->first_flusher prior to
* calling this function with non-negative @flush_color. If
* @flush_color is negative, no flush color update is done and %false
* is returned.
*
* If @work_color is non-negative, all cwqs should have the same
* work_color which is previous to @work_color and all will be
* advanced to @work_color.
*
* CONTEXT:
* mutex_lock(wq->flush_mutex).
*
* RETURNS:
* %true if @flush_color >= 0 and there's something to flush. %false
* otherwise.
*/
static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
int flush_color, int work_color)
{
bool wait = false;
unsigned int cpu;
if (flush_color >= 0) {
BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
atomic_set(&wq->nr_cwqs_to_flush, 1);
}
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
struct global_cwq *gcwq = cwq->pool->gcwq;
spin_lock_irq(&gcwq->lock);
if (flush_color >= 0) {
BUG_ON(cwq->flush_color != -1);
if (cwq->nr_in_flight[flush_color]) {
cwq->flush_color = flush_color;
atomic_inc(&wq->nr_cwqs_to_flush);
wait = true;
}
}
if (work_color >= 0) {
BUG_ON(work_color != work_next_color(cwq->work_color));
cwq->work_color = work_color;
}
spin_unlock_irq(&gcwq->lock);
}
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
complete(&wq->first_flusher->done);
return wait;
}
/**
* flush_workqueue - ensure that any scheduled work has run to completion.
* @wq: workqueue to flush
*
* Forces execution of the workqueue and blocks until its completion.
* This is typically used in driver shutdown handlers.
*
* We sleep until all works which were queued on entry have been handled,
* but we are not livelocked by new incoming ones.
*/
void flush_workqueue(struct workqueue_struct *wq)
{
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
};
int next_color;
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
mutex_lock(&wq->flush_mutex);
/*
* Start-to-wait phase
*/
next_color = work_next_color(wq->work_color);
if (next_color != wq->flush_color) {
/*
* Color space is not full. The current work_color
* becomes our flush_color and work_color is advanced
* by one.
*/
BUG_ON(!list_empty(&wq->flusher_overflow));
this_flusher.flush_color = wq->work_color;
wq->work_color = next_color;
if (!wq->first_flusher) {
/* no flush in progress, become the first flusher */
BUG_ON(wq->flush_color != this_flusher.flush_color);
wq->first_flusher = &this_flusher;
if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
wq->work_color)) {
/* nothing to flush, done */
wq->flush_color = next_color;
wq->first_flusher = NULL;
goto out_unlock;
}
} else {
/* wait in queue */
BUG_ON(wq->flush_color == this_flusher.flush_color);
list_add_tail(&this_flusher.list, &wq->flusher_queue);
flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
}
} else {
/*
* Oops, color space is full, wait on overflow queue.
* The next flush completion will assign us
* flush_color and transfer to flusher_queue.
*/
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
}
mutex_unlock(&wq->flush_mutex);
wait_for_completion(&this_flusher.done);
/*
* Wake-up-and-cascade phase
*
* First flushers are responsible for cascading flushes and
* handling overflow. Non-first flushers can simply return.
*/
if (wq->first_flusher != &this_flusher)
return;
mutex_lock(&wq->flush_mutex);
/* we might have raced, check again with mutex held */
if (wq->first_flusher != &this_flusher)
goto out_unlock;
wq->first_flusher = NULL;
BUG_ON(!list_empty(&this_flusher.list));
BUG_ON(wq->flush_color != this_flusher.flush_color);
while (true) {
struct wq_flusher *next, *tmp;
/* complete all the flushers sharing the current flush color */
list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
if (next->flush_color != wq->flush_color)
break;
list_del_init(&next->list);
complete(&next->done);
}
BUG_ON(!list_empty(&wq->flusher_overflow) &&
wq->flush_color != work_next_color(wq->work_color));
/* this flush_color is finished, advance by one */
wq->flush_color = work_next_color(wq->flush_color);
/* one color has been freed, handle overflow queue */
if (!list_empty(&wq->flusher_overflow)) {
/*
* Assign the same color to all overflowed
* flushers, advance work_color and append to
* flusher_queue. This is the start-to-wait
* phase for these overflowed flushers.
*/
list_for_each_entry(tmp, &wq->flusher_overflow, list)
tmp->flush_color = wq->work_color;
wq->work_color = work_next_color(wq->work_color);
list_splice_tail_init(&wq->flusher_overflow,
&wq->flusher_queue);
flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
}
if (list_empty(&wq->flusher_queue)) {
BUG_ON(wq->flush_color != wq->work_color);
break;
}
/*
* Need to flush more colors. Make the next flusher
* the new first flusher and arm cwqs.
*/
BUG_ON(wq->flush_color == wq->work_color);
BUG_ON(wq->flush_color != next->flush_color);
list_del_init(&next->list);
wq->first_flusher = next;
if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
break;
/*
* Meh... this color is already done, clear first
* flusher and repeat cascading.
*/
wq->first_flusher = NULL;
}
out_unlock:
mutex_unlock(&wq->flush_mutex);
}
EXPORT_SYMBOL_GPL(flush_workqueue);
/**
* drain_workqueue - drain a workqueue
* @wq: workqueue to drain
*
* Wait until the workqueue becomes empty. While draining is in progress,
* only chain queueing is allowed. IOW, only currently pending or running
* work items on @wq can queue further work items on it. @wq is flushed
* repeatedly until it becomes empty. The number of flushing is detemined
* by the depth of chaining and should be relatively short. Whine if it
* takes too long.
*/
void drain_workqueue(struct workqueue_struct *wq)
{
unsigned int flush_cnt = 0;
unsigned int cpu;
/*
* __queue_work() needs to test whether there are drainers, is much
* hotter than drain_workqueue() and already looks at @wq->flags.
* Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
*/
spin_lock(&workqueue_lock);
if (!wq->nr_drainers++)
wq->flags |= WQ_DRAINING;
spin_unlock(&workqueue_lock);
reflush:
flush_workqueue(wq);
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
bool drained;
spin_lock_irq(&cwq->pool->gcwq->lock);
drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
spin_unlock_irq(&cwq->pool->gcwq->lock);
if (drained)
continue;
if (++flush_cnt == 10 ||
(flush_cnt % 100 == 0 && flush_cnt <= 1000))
pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
wq->name, flush_cnt);
goto reflush;
}
spin_lock(&workqueue_lock);
if (!--wq->nr_drainers)
wq->flags &= ~WQ_DRAINING;
spin_unlock(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool wait_executing)
{
struct worker *worker = NULL;
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
might_sleep();
gcwq = get_work_gcwq(work);
if (!gcwq)
return false;
spin_lock_irq(&gcwq->lock);
if (!list_empty(&work->entry)) {
/*
* See the comment near try_to_grab_pending()->smp_rmb().
* If it was re-queued to a different gcwq under us, we
* are not going to wait.
*/
smp_rmb();
cwq = get_work_cwq(work);
if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
goto already_gone;
} else if (wait_executing) {
worker = find_worker_executing_work(gcwq, work);
if (!worker)
goto already_gone;
cwq = worker->current_cwq;
} else
goto already_gone;
insert_wq_barrier(cwq, barr, work, worker);
spin_unlock_irq(&gcwq->lock);
/*
* If @max_active is 1 or rescuer is in use, flushing another work
* item on the same workqueue may lead to deadlock. Make sure the
* flusher is not running on the same workqueue by verifying write
* access.
*/
if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
lock_map_acquire(&cwq->wq->lockdep_map);
else
lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
return true;
already_gone:
spin_unlock_irq(&gcwq->lock);
return false;
}
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
*
* Wait until @work has finished execution. This function considers
* only the last queueing instance of @work. If @work has been
* enqueued across different CPUs on a non-reentrant workqueue or on
* multiple workqueues, @work might still be executing on return on
* some of the CPUs from earlier queueing.
*
* If @work was queued only on a non-reentrant, ordered or unbound
* workqueue, @work is guaranteed to be idle on return if it hasn't
* been requeued since flush started.
*
* RETURNS:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work(struct work_struct *work)
{
struct wq_barrier barr;
if (start_flush_work(work, &barr, true)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else
return false;
}
EXPORT_SYMBOL_GPL(flush_work);
static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
{
struct wq_barrier barr;
struct worker *worker;
spin_lock_irq(&gcwq->lock);
worker = find_worker_executing_work(gcwq, work);
if (unlikely(worker))
insert_wq_barrier(worker->current_cwq, &barr, work, worker);
spin_unlock_irq(&gcwq->lock);
if (unlikely(worker)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else
return false;
}
static bool wait_on_work(struct work_struct *work)
{
bool ret = false;
int cpu;
might_sleep();
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
for_each_gcwq_cpu(cpu)
ret |= wait_on_cpu_work(get_gcwq(cpu), work);
return ret;
}
/**
* flush_work_sync - wait until a work has finished execution
* @work: the work to flush
*
* Wait until @work has finished execution. On return, it's
* guaranteed that all queueing instances of @work which happened
* before this function is called are finished. In other words, if
* @work hasn't been requeued since this function was called, @work is
* guaranteed to be idle on return.
*
* RETURNS:
* %true if flush_work_sync() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work_sync(struct work_struct *work)
{
struct wq_barrier barr;
bool pending, waited;
/* we'll wait for executions separately, queue barr only if pending */
pending = start_flush_work(work, &barr, false);
/* wait for executions to finish */
waited = wait_on_work(work);
/* wait for the pending one */
if (pending) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
}
return pending || waited;
}
EXPORT_SYMBOL_GPL(flush_work_sync);
/*
* Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
* so this work can't be re-armed in any way.
*/
static int try_to_grab_pending(struct work_struct *work)
{
struct global_cwq *gcwq;
int ret = -1;
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
*/
gcwq = get_work_gcwq(work);
if (!gcwq)
return ret;
spin_lock_irq(&gcwq->lock);
if (!list_empty(&work->entry)) {
/*
* This work is queued, but perhaps we locked the wrong gcwq.
* In that case we must see the new value after rmb(), see
* insert_work()->wmb().
*/
smp_rmb();
if (gcwq == get_work_gcwq(work)) {
debug_work_deactivate(work);
list_del_init(&work->entry);
cwq_dec_nr_in_flight(get_work_cwq(work),
get_work_color(work),
*work_data_bits(work) & WORK_STRUCT_DELAYED);
ret = 1;
}
}
spin_unlock_irq(&gcwq->lock);
return ret;
}
static bool __cancel_work_timer(struct work_struct *work,
struct timer_list* timer)
{
int ret;
do {
ret = (timer && likely(del_timer(timer)));
if (!ret)
ret = try_to_grab_pending(work);
wait_on_work(work);
} while (unlikely(ret < 0));
clear_work_data(work);
return ret;
}
/**
* cancel_work_sync - cancel a work and wait for it to finish
* @work: the work to cancel
*
* Cancel @work and wait for its execution to finish. This function
* can be used even if the work re-queues itself or migrates to
* another workqueue. On return from this function, @work is
* guaranteed to be not pending or executing on any CPU.
*
* cancel_work_sync(&delayed_work->work) must not be used for
* delayed_work's. Use cancel_delayed_work_sync() instead.
*
* The caller must ensure that the workqueue on which @work was last
* queued can't be destroyed before this function returns.
*
* RETURNS:
* %true if @work was pending, %false otherwise.
*/
bool cancel_work_sync(struct work_struct *work)
{
return __cancel_work_timer(work, NULL);
}
EXPORT_SYMBOL_GPL(cancel_work_sync);
/**
* flush_delayed_work - wait for a dwork to finish executing the last queueing
* @dwork: the delayed work to flush
*
* Delayed timer is cancelled and the pending work is queued for
* immediate execution. Like flush_work(), this function only
* considers the last queueing instance of @dwork.
*
* RETURNS:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
if (del_timer_sync(&dwork->timer))
__queue_work(raw_smp_processor_id(),
get_work_cwq(&dwork->work)->wq, &dwork->work);
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
/**
* flush_delayed_work_sync - wait for a dwork to finish
* @dwork: the delayed work to flush
*
* Delayed timer is cancelled and the pending work is queued for
* execution immediately. Other than timer handling, its behavior
* is identical to flush_work_sync().
*
* RETURNS:
* %true if flush_work_sync() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_delayed_work_sync(struct delayed_work *dwork)
{
if (del_timer_sync(&dwork->timer))
__queue_work(raw_smp_processor_id(),
get_work_cwq(&dwork->work)->wq, &dwork->work);
return flush_work_sync(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work_sync);
/**
* cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
* @dwork: the delayed work cancel
*
* This is cancel_work_sync() for delayed works.
*
* RETURNS:
* %true if @dwork was pending, %false otherwise.
*/
bool cancel_delayed_work_sync(struct delayed_work *dwork)
{
return __cancel_work_timer(&dwork->work, &dwork->timer);
}
EXPORT_SYMBOL(cancel_delayed_work_sync);
/**
* schedule_work - put work task in global workqueue
* @work: job to be done
*
* Returns zero if @work was already on the kernel-global workqueue and
* non-zero otherwise.
*
* This puts a job in the kernel-global workqueue if it was not already
* queued and leaves it in the same position on the kernel-global
* workqueue otherwise.
*/
int schedule_work(struct work_struct *work)
{
return queue_work(system_wq, work);
}
EXPORT_SYMBOL(schedule_work);
/*
* schedule_work_on - put work task on a specific cpu
* @cpu: cpu to put the work task on
* @work: job to be done
*
* This puts a job on a specific cpu
*/
int schedule_work_on(int cpu, struct work_struct *work)
{
return queue_work_on(cpu, system_wq, work);
}
EXPORT_SYMBOL(schedule_work_on);
/**
* schedule_delayed_work - put work task in global workqueue after delay
* @dwork: job to be done
* @delay: number of jiffies to wait or 0 for immediate execution
*
* After waiting for a given time this puts a job in the kernel-global
* workqueue.
*/
int schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work(system_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work);
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @cpu: cpu to use
* @dwork: job to be done
* @delay: number of jiffies to wait
*
* After waiting for a given time this puts a job in the kernel-global
* workqueue on the specified CPU.
*/
int schedule_delayed_work_on(int cpu,
struct delayed_work *dwork, unsigned long delay)
{
return queue_delayed_work_on(cpu, system_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work_on);
/**
* schedule_on_each_cpu - execute a function synchronously on each online CPU
* @func: the function to call
*
* schedule_on_each_cpu() executes @func on each online CPU using the
* system workqueue and blocks until all CPUs have completed.
* schedule_on_each_cpu() is very slow.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int schedule_on_each_cpu(work_func_t func)
{
int cpu;
struct work_struct __percpu *works;
works = alloc_percpu(struct work_struct);
if (!works)
return -ENOMEM;
get_online_cpus();
for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu);
INIT_WORK(work, func);
schedule_work_on(cpu, work);
}
for_each_online_cpu(cpu)
flush_work(per_cpu_ptr(works, cpu));
put_online_cpus();
free_percpu(works);
return 0;
}
/**
* flush_scheduled_work - ensure that any scheduled work has run to completion.
*
* Forces execution of the kernel-global workqueue and blocks until its
* completion.
*
* Think twice before calling this function! It's very easy to get into
* trouble if you don't take great care. Either of the following situations
* will lead to deadlock:
*
* One of the work items currently on the workqueue needs to acquire
* a lock held by your code or its caller.
*
* Your code is running in the context of a work routine.
*
* They will be detected by lockdep when they occur, but the first might not
* occur very often. It depends on what work items are on the workqueue and
* what locks they need, which you have no control over.
*
* In most situations flushing the entire workqueue is overkill; you merely
* need to know that a particular work item isn't queued and isn't running.
* In such cases you should use cancel_delayed_work_sync() or
* cancel_work_sync() instead.
*/
void flush_scheduled_work(void)
{
flush_workqueue(system_wq);
}
EXPORT_SYMBOL(flush_scheduled_work);
/**
* execute_in_process_context - reliably execute the routine with user context
* @fn: the function to execute
* @ew: guaranteed storage for the execute work structure (must
* be available when the work executes)
*
* Executes the function immediately if process context is available,
* otherwise schedules the function for delayed execution.
*
* Returns: 0 - function was executed
* 1 - function was scheduled for execution
*/
int execute_in_process_context(work_func_t fn, struct execute_work *ew)
{
if (!in_interrupt()) {
fn(&ew->work);
return 0;
}
INIT_WORK(&ew->work, fn);
schedule_work(&ew->work);
return 1;
}
EXPORT_SYMBOL_GPL(execute_in_process_context);
int keventd_up(void)
{
return system_wq != NULL;
}
static int alloc_cwqs(struct workqueue_struct *wq)
{
/*
* cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
* Make sure that the alignment isn't lower than that of
* unsigned long long.
*/
const size_t size = sizeof(struct cpu_workqueue_struct);
const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
__alignof__(unsigned long long));
if (!(wq->flags & WQ_UNBOUND))
wq->cpu_wq.pcpu = __alloc_percpu(size, align);
else {
void *ptr;
/*
* Allocate enough room to align cwq and put an extra
* pointer at the end pointing back to the originally
* allocated pointer which will be used for free.
*/
ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
if (ptr) {
wq->cpu_wq.single = PTR_ALIGN(ptr, align);
*(void **)(wq->cpu_wq.single + 1) = ptr;
}
}
/* just in case, make sure it's actually aligned */
BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
return wq->cpu_wq.v ? 0 : -ENOMEM;
}
static void free_cwqs(struct workqueue_struct *wq)
{
if (!(wq->flags & WQ_UNBOUND))
free_percpu(wq->cpu_wq.pcpu);
else if (wq->cpu_wq.single) {
/* the pointer to free is stored right after the cwq */
kfree(*(void **)(wq->cpu_wq.single + 1));
}
}
static int wq_clamp_max_active(int max_active, unsigned int flags,
const char *name)
{
int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
if (max_active < 1 || max_active > lim)
printk(KERN_WARNING "workqueue: max_active %d requested for %s "
"is out of range, clamping between %d and %d\n",
max_active, name, 1, lim);
return clamp_val(max_active, 1, lim);
}
struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
unsigned int flags,
int max_active,
struct lock_class_key *key,
const char *lock_name, ...)
{
va_list args, args1;
struct workqueue_struct *wq;
unsigned int cpu;
size_t namelen;
/* determine namelen, allocate wq and format name */
va_start(args, lock_name);
va_copy(args1, args);
namelen = vsnprintf(NULL, 0, fmt, args) + 1;
wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
if (!wq)
goto err;
vsnprintf(wq->name, namelen, fmt, args1);
va_end(args);
va_end(args1);
/* see the comment above the definition of WQ_POWER_EFFICIENT */
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
flags |= WQ_UNBOUND;
/*
* Workqueues which may be used during memory reclaim should
* have a rescuer to guarantee forward progress.
*/
if (flags & WQ_MEM_RECLAIM)
flags |= WQ_RESCUER;
max_active = max_active ?: WQ_DFL_ACTIVE;
max_active = wq_clamp_max_active(max_active, flags, wq->name);
/* init wq */
wq->flags = flags;
wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex);
atomic_set(&wq->nr_cwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
INIT_LIST_HEAD(&wq->list);
if (alloc_cwqs(wq) < 0)
goto err;
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
struct global_cwq *gcwq = get_gcwq(cpu);
int pool_idx = (bool)(flags & WQ_HIGHPRI);
BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
cwq->pool = &gcwq->pools[pool_idx];
cwq->wq = wq;
cwq->flush_color = -1;
cwq->max_active = max_active;
INIT_LIST_HEAD(&cwq->delayed_works);
}
if (flags & WQ_RESCUER) {
struct worker *rescuer;
if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
goto err;
wq->rescuer = rescuer = alloc_worker();
if (!rescuer)
goto err;
rescuer->task = kthread_create(rescuer_thread, wq, "%s",
wq->name);
if (IS_ERR(rescuer->task))
goto err;
rescuer->task->flags |= PF_THREAD_BOUND;
wake_up_process(rescuer->task);
}
/*
* workqueue_lock protects global freeze state and workqueues
* list. Grab it, set max_active accordingly and add the new
* workqueue to workqueues list.
*/
spin_lock(&workqueue_lock);
if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
for_each_cwq_cpu(cpu, wq)
get_cwq(cpu, wq)->max_active = 0;
list_add(&wq->list, &workqueues);
spin_unlock(&workqueue_lock);
return wq;
err:
if (wq) {
free_cwqs(wq);
free_mayday_mask(wq->mayday_mask);
kfree(wq->rescuer);
kfree(wq);
}
return NULL;
}
EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
/**
* destroy_workqueue - safely terminate a workqueue
* @wq: target workqueue
*
* Safely destroy a workqueue. All work currently pending will be done first.
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
unsigned int cpu;
/* drain it before proceeding with destruction */
drain_workqueue(wq);
/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
spin_lock(&workqueue_lock);
list_del(&wq->list);
spin_unlock(&workqueue_lock);
/* sanity check */
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
int i;
for (i = 0; i < WORK_NR_COLORS; i++)
BUG_ON(cwq->nr_in_flight[i]);
BUG_ON(cwq->nr_active);
BUG_ON(!list_empty(&cwq->delayed_works));
}
if (wq->flags & WQ_RESCUER) {
kthread_stop(wq->rescuer->task);
free_mayday_mask(wq->mayday_mask);
kfree(wq->rescuer);
}
free_cwqs(wq);
kfree(wq);
}
EXPORT_SYMBOL_GPL(destroy_workqueue);
/**
* workqueue_set_max_active - adjust max_active of a workqueue
* @wq: target workqueue
* @max_active: new max_active value.
*
* Set max_active of @wq to @max_active.
*
* CONTEXT:
* Don't call from IRQ context.
*/
void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
{
unsigned int cpu;
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
spin_lock(&workqueue_lock);
wq->saved_max_active = max_active;
for_each_cwq_cpu(cpu, wq) {
struct global_cwq *gcwq = get_gcwq(cpu);
spin_lock_irq(&gcwq->lock);
if (!(wq->flags & WQ_FREEZABLE) ||
!(gcwq->flags & GCWQ_FREEZING))
get_cwq(gcwq->cpu, wq)->max_active = max_active;
spin_unlock_irq(&gcwq->lock);
}
spin_unlock(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
/**
* workqueue_congested - test whether a workqueue is congested
* @cpu: CPU in question
* @wq: target workqueue
*
* Test whether @wq's cpu workqueue for @cpu is congested. There is
* no synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
*
* RETURNS:
* %true if congested, %false otherwise.
*/
bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
{
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
return !list_empty(&cwq->delayed_works);
}
EXPORT_SYMBOL_GPL(workqueue_congested);
/**
* work_cpu - return the last known associated cpu for @work
* @work: the work of interest
*
* RETURNS:
* CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
*/
unsigned int work_cpu(struct work_struct *work)
{
struct global_cwq *gcwq = get_work_gcwq(work);
return gcwq ? gcwq->cpu : WORK_CPU_NONE;
}
EXPORT_SYMBOL_GPL(work_cpu);
/**
* work_busy - test whether a work is currently pending or running
* @work: the work to be tested
*
* Test whether @work is currently pending or running. There is no
* synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
* Especially for reentrant wqs, the pending state might hide the
* running state.
*
* RETURNS:
* OR'd bitmask of WORK_BUSY_* bits.
*/
unsigned int work_busy(struct work_struct *work)
{
struct global_cwq *gcwq = get_work_gcwq(work);
unsigned long flags;
unsigned int ret = 0;
if (!gcwq)
return false;
spin_lock_irqsave(&gcwq->lock, flags);
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
if (find_worker_executing_work(gcwq, work))
ret |= WORK_BUSY_RUNNING;
spin_unlock_irqrestore(&gcwq->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(work_busy);
/*
* CPU hotplug.
*
* There are two challenges in supporting CPU hotplug. Firstly, there
* are a lot of assumptions on strong associations among work, cwq and
* gcwq which make migrating pending and scheduled works very
* difficult to implement without impacting hot paths. Secondly,
* gcwqs serve mix of short, long and very long running works making
* blocked draining impractical.
*
* This is solved by allowing a gcwq to be detached from CPU, running
* it with unbound (rogue) workers and allowing it to be reattached
* later if the cpu comes back online. A separate thread is created
* to govern a gcwq in such state and is called the trustee of the
* gcwq.
*
* Trustee states and their descriptions.
*
* START Command state used on startup. On CPU_DOWN_PREPARE, a
* new trustee is started with this state.
*
* IN_CHARGE Once started, trustee will enter this state after
* assuming the manager role and making all existing
* workers rogue. DOWN_PREPARE waits for trustee to
* enter this state. After reaching IN_CHARGE, trustee
* tries to execute the pending worklist until it's empty
* and the state is set to BUTCHER, or the state is set
* to RELEASE.
*
* BUTCHER Command state which is set by the cpu callback after
* the cpu has went down. Once this state is set trustee
* knows that there will be no new works on the worklist
* and once the worklist is empty it can proceed to
* killing idle workers.
*
* RELEASE Command state which is set by the cpu callback if the
* cpu down has been canceled or it has come online
* again. After recognizing this state, trustee stops
* trying to drain or butcher and clears ROGUE, rebinds
* all remaining workers back to the cpu and releases
* manager role.
*
* DONE Trustee will enter this state after BUTCHER or RELEASE
* is complete.
*
* trustee CPU draining
* took over down complete
* START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
* | | ^
* | CPU is back online v return workers |
* ----------------> RELEASE --------------
*/
/**
* trustee_wait_event_timeout - timed event wait for trustee
* @cond: condition to wait for
* @timeout: timeout in jiffies
*
* wait_event_timeout() for trustee to use. Handles locking and
* checks for RELEASE request.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. To be used by trustee.
*
* RETURNS:
* Positive indicating left time if @cond is satisfied, 0 if timed
* out, -1 if canceled.
*/
#define trustee_wait_event_timeout(cond, timeout) ({ \
long __ret = (timeout); \
while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
__ret) { \
spin_unlock_irq(&gcwq->lock); \
__wait_event_timeout(gcwq->trustee_wait, (cond) || \
(gcwq->trustee_state == TRUSTEE_RELEASE), \
__ret); \
spin_lock_irq(&gcwq->lock); \
} \
gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
})
/**
* trustee_wait_event - event wait for trustee
* @cond: condition to wait for
*
* wait_event() for trustee to use. Automatically handles locking and
* checks for CANCEL request.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. To be used by trustee.
*
* RETURNS:
* 0 if @cond is satisfied, -1 if canceled.
*/
#define trustee_wait_event(cond) ({ \
long __ret1; \
__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
__ret1 < 0 ? -1 : 0; \
})
static bool gcwq_is_managing_workers(struct global_cwq *gcwq)
{
struct worker_pool *pool;
for_each_worker_pool(pool, gcwq)
if (pool->flags & POOL_MANAGING_WORKERS)
return true;
return false;
}
static bool gcwq_has_idle_workers(struct global_cwq *gcwq)
{
struct worker_pool *pool;
for_each_worker_pool(pool, gcwq)
if (!list_empty(&pool->idle_list))
return true;
return false;
}
static int __cpuinit trustee_thread(void *__gcwq)
{
struct global_cwq *gcwq = __gcwq;
struct worker_pool *pool;
struct worker *worker;
struct work_struct *work;
struct hlist_node *pos;
long rc;
int i;
BUG_ON(gcwq->cpu != smp_processor_id());
spin_lock_irq(&gcwq->lock);
/*
* Claim the manager position and make all workers rogue.
* Trustee must be bound to the target cpu and can't be
* cancelled.
*/
BUG_ON(gcwq->cpu != smp_processor_id());
rc = trustee_wait_event(!gcwq_is_managing_workers(gcwq));
BUG_ON(rc < 0);
for_each_worker_pool(pool, gcwq) {
pool->flags |= POOL_MANAGING_WORKERS;
list_for_each_entry(worker, &pool->idle_list, entry)
worker->flags |= WORKER_ROGUE;
}
for_each_busy_worker(worker, i, pos, gcwq)
worker->flags |= WORKER_ROGUE;
/*
* Call schedule() so that we cross rq->lock and thus can
* guarantee sched callbacks see the rogue flag. This is
* necessary as scheduler callbacks may be invoked from other
* cpus.
*/
spin_unlock_irq(&gcwq->lock);
schedule();
spin_lock_irq(&gcwq->lock);
/*
* Sched callbacks are disabled now. Zap nr_running. After
* this, nr_running stays zero and need_more_worker() and
* keep_working() are always true as long as the worklist is
* not empty.
*/
for_each_worker_pool(pool, gcwq)
atomic_set(get_pool_nr_running(pool), 0);
spin_unlock_irq(&gcwq->lock);
for_each_worker_pool(pool, gcwq)
del_timer_sync(&pool->idle_timer);
spin_lock_irq(&gcwq->lock);
/*
* We're now in charge. Notify and proceed to drain. We need
* to keep the gcwq running during the whole CPU down
* procedure as other cpu hotunplug callbacks may need to
* flush currently running tasks.
*/
gcwq->trustee_state = TRUSTEE_IN_CHARGE;
wake_up_all(&gcwq->trustee_wait);
/*
* The original cpu is in the process of dying and may go away
* anytime now. When that happens, we and all workers would
* be migrated to other cpus. Try draining any left work. We
* want to get it over with ASAP - spam rescuers, wake up as
* many idlers as necessary and create new ones till the
* worklist is empty. Note that if the gcwq is frozen, there
* may be frozen works in freezable cwqs. Don't declare
* completion while frozen.
*/
while (true) {
bool busy = false;
for_each_worker_pool(pool, gcwq)
busy |= pool->nr_workers != pool->nr_idle;
if (!busy && !(gcwq->flags & GCWQ_FREEZING) &&
gcwq->trustee_state != TRUSTEE_IN_CHARGE)
break;
for_each_worker_pool(pool, gcwq) {
int nr_works = 0;
list_for_each_entry(work, &pool->worklist, entry) {
send_mayday(work);
nr_works++;
}
list_for_each_entry(worker, &pool->idle_list, entry) {
if (!nr_works--)
break;
wake_up_process(worker->task);
}
if (need_to_create_worker(pool)) {
spin_unlock_irq(&gcwq->lock);
worker = create_worker(pool, false);
spin_lock_irq(&gcwq->lock);
if (worker) {
worker->flags |= WORKER_ROGUE;
start_worker(worker);
}
}
}
/* give a breather */
if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
break;
}
/*
* Either all works have been scheduled and cpu is down, or
* cpu down has already been canceled. Wait for and butcher
* all workers till we're canceled.
*/
do {
rc = trustee_wait_event(gcwq_has_idle_workers(gcwq));
i = 0;
for_each_worker_pool(pool, gcwq) {
while (!list_empty(&pool->idle_list)) {
worker = list_first_entry(&pool->idle_list,
struct worker, entry);
destroy_worker(worker);
}
i |= pool->nr_workers;
}
} while (i && rc >= 0);
/*
* At this point, either draining has completed and no worker
* is left, or cpu down has been canceled or the cpu is being
* brought back up. There shouldn't be any idle one left.
* Tell the remaining busy ones to rebind once it finishes the
* currently scheduled works by scheduling the rebind_work.
*/
for_each_worker_pool(pool, gcwq)
WARN_ON(!list_empty(&pool->idle_list));
for_each_busy_worker(worker, i, pos, gcwq) {
struct work_struct *rebind_work = &worker->rebind_work;
/*
* Rebind_work may race with future cpu hotplug
* operations. Use a separate flag to mark that
* rebinding is scheduled.
*/
worker->flags |= WORKER_REBIND;
worker->flags &= ~WORKER_ROGUE;
/* queue rebind_work, wq doesn't matter, use the default one */
if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
work_data_bits(rebind_work)))
continue;
debug_work_activate(rebind_work);
insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
worker->scheduled.next,
work_color_to_flags(WORK_NO_COLOR));
}
/* relinquish manager role */
for_each_worker_pool(pool, gcwq)
pool->flags &= ~POOL_MANAGING_WORKERS;
/* notify completion */
gcwq->trustee = NULL;
gcwq->trustee_state = TRUSTEE_DONE;
wake_up_all(&gcwq->trustee_wait);
spin_unlock_irq(&gcwq->lock);
return 0;
}
/**
* wait_trustee_state - wait for trustee to enter the specified state
* @gcwq: gcwq the trustee of interest belongs to
* @state: target state to wait for
*
* Wait for the trustee to reach @state. DONE is already matched.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. To be used by cpu_callback.
*/
static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
__releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
if (!(gcwq->trustee_state == state ||
gcwq->trustee_state == TRUSTEE_DONE)) {
spin_unlock_irq(&gcwq->lock);
__wait_event(gcwq->trustee_wait,
gcwq->trustee_state == state ||
gcwq->trustee_state == TRUSTEE_DONE);
spin_lock_irq(&gcwq->lock);
}
}
static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct global_cwq *gcwq = get_gcwq(cpu);
struct task_struct *new_trustee = NULL;
struct worker *new_workers[NR_WORKER_POOLS] = { };
struct worker_pool *pool;
unsigned long flags;
int i;
action &= ~CPU_TASKS_FROZEN;
switch (action) {
case CPU_DOWN_PREPARE:
new_trustee = kthread_create(trustee_thread, gcwq,
"workqueue_trustee/%d\n", cpu);
if (IS_ERR(new_trustee))
return notifier_from_errno(PTR_ERR(new_trustee));
kthread_bind(new_trustee, cpu);
/* fall through */
case CPU_UP_PREPARE:
i = 0;
for_each_worker_pool(pool, gcwq) {
BUG_ON(pool->first_idle);
new_workers[i] = create_worker(pool, false);
if (!new_workers[i++])
goto err_destroy;
}
}
/* some are called w/ irq disabled, don't disturb irq status */
spin_lock_irqsave(&gcwq->lock, flags);
switch (action) {
case CPU_DOWN_PREPARE:
/* initialize trustee and tell it to acquire the gcwq */
BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
gcwq->trustee = new_trustee;
gcwq->trustee_state = TRUSTEE_START;
wake_up_process(gcwq->trustee);
wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
/* fall through */
case CPU_UP_PREPARE:
i = 0;
for_each_worker_pool(pool, gcwq) {
BUG_ON(pool->first_idle);
pool->first_idle = new_workers[i++];
}
break;
case CPU_DYING:
/*
* Before this, the trustee and all workers except for
* the ones which are still executing works from
* before the last CPU down must be on the cpu. After
* this, they'll all be diasporas.
*/
gcwq->flags |= GCWQ_DISASSOCIATED;
break;
case CPU_POST_DEAD:
gcwq->trustee_state = TRUSTEE_BUTCHER;
/* fall through */
case CPU_UP_CANCELED:
for_each_worker_pool(pool, gcwq) {
destroy_worker(pool->first_idle);
pool->first_idle = NULL;
}
break;
case CPU_DOWN_FAILED:
case CPU_ONLINE:
gcwq->flags &= ~GCWQ_DISASSOCIATED;
if (gcwq->trustee_state != TRUSTEE_DONE) {
gcwq->trustee_state = TRUSTEE_RELEASE;
wake_up_process(gcwq->trustee);
wait_trustee_state(gcwq, TRUSTEE_DONE);
}
/*
* Trustee is done and there might be no worker left.
* Put the first_idle in and request a real manager to
* take a look.
*/
for_each_worker_pool(pool, gcwq) {
spin_unlock_irq(&gcwq->lock);
kthread_bind(pool->first_idle->task, cpu);
spin_lock_irq(&gcwq->lock);
pool->flags |= POOL_MANAGE_WORKERS;
start_worker(pool->first_idle);
pool->first_idle = NULL;
}
break;
}
spin_unlock_irqrestore(&gcwq->lock, flags);
return notifier_from_errno(0);
err_destroy:
if (new_trustee)
kthread_stop(new_trustee);
spin_lock_irqsave(&gcwq->lock, flags);
for (i = 0; i < NR_WORKER_POOLS; i++)
if (new_workers[i])
destroy_worker(new_workers[i]);
spin_unlock_irqrestore(&gcwq->lock, flags);
return NOTIFY_BAD;
}
#ifdef CONFIG_SMP
struct work_for_cpu {
struct completion completion;
long (*fn)(void *);
void *arg;
long ret;
};
static int do_work_for_cpu(void *_wfc)
{
struct work_for_cpu *wfc = _wfc;
wfc->ret = wfc->fn(wfc->arg);
complete(&wfc->completion);
return 0;
}
/**
* work_on_cpu - run a function in user context on a particular cpu
* @cpu: the cpu to run on
* @fn: the function to run
* @arg: the function arg
*
* This will return the value @fn returns.
* It is up to the caller to ensure that the cpu doesn't go offline.
* The caller must not hold any locks which would prevent @fn from completing.
*/
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
struct task_struct *sub_thread;
struct work_for_cpu wfc = {
.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
.fn = fn,
.arg = arg,
};
sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
if (IS_ERR(sub_thread))
return PTR_ERR(sub_thread);
kthread_bind(sub_thread, cpu);
wake_up_process(sub_thread);
wait_for_completion(&wfc.completion);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
/**
* freeze_workqueues_begin - begin freezing workqueues
*
* Start freezing workqueues. After this function returns, all freezable
* workqueues will queue new works to their frozen_works list instead of
* gcwq->worklist.
*
* CONTEXT:
* Grabs and releases workqueue_lock and gcwq->lock's.
*/
void freeze_workqueues_begin(void)
{
unsigned int cpu;
spin_lock(&workqueue_lock);
BUG_ON(workqueue_freezing);
workqueue_freezing = true;
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct workqueue_struct *wq;
spin_lock_irq(&gcwq->lock);
BUG_ON(gcwq->flags & GCWQ_FREEZING);
gcwq->flags |= GCWQ_FREEZING;
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (cwq && wq->flags & WQ_FREEZABLE)
cwq->max_active = 0;
}
spin_unlock_irq(&gcwq->lock);
}
spin_unlock(&workqueue_lock);
}
/**
* freeze_workqueues_busy - are freezable workqueues still busy?
*
* Check whether freezing is complete. This function must be called
* between freeze_workqueues_begin() and thaw_workqueues().
*
* CONTEXT:
* Grabs and releases workqueue_lock.
*
* RETURNS:
* %true if some freezable workqueues are still busy. %false if freezing
* is complete.
*/
bool freeze_workqueues_busy(void)
{
unsigned int cpu;
bool busy = false;
spin_lock(&workqueue_lock);
BUG_ON(!workqueue_freezing);
for_each_gcwq_cpu(cpu) {
struct workqueue_struct *wq;
/*
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!cwq || !(wq->flags & WQ_FREEZABLE))
continue;
BUG_ON(cwq->nr_active < 0);
if (cwq->nr_active) {
busy = true;
goto out_unlock;
}
}
}
out_unlock:
spin_unlock(&workqueue_lock);
return busy;
}
/**
* thaw_workqueues - thaw workqueues
*
* Thaw workqueues. Normal queueing is restored and all collected
* frozen works are transferred to their respective gcwq worklists.
*
* CONTEXT:
* Grabs and releases workqueue_lock and gcwq->lock's.
*/
void thaw_workqueues(void)
{
unsigned int cpu;
spin_lock(&workqueue_lock);
if (!workqueue_freezing)
goto out_unlock;
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker_pool *pool;
struct workqueue_struct *wq;
spin_lock_irq(&gcwq->lock);
BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
gcwq->flags &= ~GCWQ_FREEZING;
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!cwq || !(wq->flags & WQ_FREEZABLE))
continue;
/* restore max_active and repopulate worklist */
cwq->max_active = wq->saved_max_active;
while (!list_empty(&cwq->delayed_works) &&
cwq->nr_active < cwq->max_active)
cwq_activate_first_delayed(cwq);
}
for_each_worker_pool(pool, gcwq)
wake_up_worker(pool);
spin_unlock_irq(&gcwq->lock);
}
workqueue_freezing = false;
out_unlock:
spin_unlock(&workqueue_lock);
}
#endif /* CONFIG_FREEZER */
static int __init init_workqueues(void)
{
unsigned int cpu;
int i;
cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
/* initialize gcwqs */
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker_pool *pool;
spin_lock_init(&gcwq->lock);
gcwq->cpu = cpu;
gcwq->flags |= GCWQ_DISASSOCIATED;
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
for_each_worker_pool(pool, gcwq) {
pool->gcwq = gcwq;
INIT_LIST_HEAD(&pool->worklist);
INIT_LIST_HEAD(&pool->idle_list);
init_timer_deferrable(&pool->idle_timer);
pool->idle_timer.function = idle_worker_timeout;
pool->idle_timer.data = (unsigned long)pool;
setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
(unsigned long)pool);
ida_init(&pool->worker_ida);
}
gcwq->trustee_state = TRUSTEE_DONE;
init_waitqueue_head(&gcwq->trustee_wait);
}
/* create the initial worker */
for_each_online_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker_pool *pool;
if (cpu != WORK_CPU_UNBOUND)
gcwq->flags &= ~GCWQ_DISASSOCIATED;
for_each_worker_pool(pool, gcwq) {
struct worker *worker;
worker = create_worker(pool, true);
BUG_ON(!worker);
spin_lock_irq(&gcwq->lock);
start_worker(worker);
spin_unlock_irq(&gcwq->lock);
}
}
system_wq = alloc_workqueue("events", 0, 0);
system_long_wq = alloc_workqueue("events_long", 0, 0);
system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
WQ_UNBOUND_MAX_ACTIVE);
system_freezable_wq = alloc_workqueue("events_freezable",
WQ_FREEZABLE, 0);
system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
system_power_efficient_wq = alloc_workqueue("events_power_efficient",
WQ_POWER_EFFICIENT, 0);
system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
WQ_FREEZABLE | WQ_POWER_EFFICIENT,
0);
BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
!system_unbound_wq || !system_freezable_wq ||
!system_nrt_freezable_wq ||
!system_power_efficient_wq ||
!system_freezable_power_efficient_wq);
return 0;
}
early_initcall(init_workqueues);
| SubhrajyotiSen/HelioxKernelOnyx | kernel/workqueue.c | C | gpl-2.0 | 109,169 |
#include "../comedidev.h"
#include "comedi_pci.h"
#include "8255.h"
#define PCI_VENDOR_ID_CB 0x1307 /* PCI vendor number of ComputerBoards */
#define EEPROM_SIZE 128 /* number of entries in eeprom */
#define MAX_AO_CHANNELS 8 /* maximum number of ao channels for supported boards */
/* PCI-DDA base addresses */
#define DIGITALIO_BADRINDEX 2
/* DIGITAL I/O is pci_dev->resource[2] */
#define DIGITALIO_SIZE 8
/* DIGITAL I/O uses 8 I/O port addresses */
#define DAC_BADRINDEX 3
/* DAC is pci_dev->resource[3] */
/* Digital I/O registers */
#define PORT1A 0 /* PORT 1A DATA */
#define PORT1B 1 /* PORT 1B DATA */
#define PORT1C 2 /* PORT 1C DATA */
#define CONTROL1 3 /* CONTROL REGISTER 1 */
#define PORT2A 4 /* PORT 2A DATA */
#define PORT2B 5 /* PORT 2B DATA */
#define PORT2C 6 /* PORT 2C DATA */
#define CONTROL2 7 /* CONTROL REGISTER 2 */
/* DAC registers */
#define DACONTROL 0 /* D/A CONTROL REGISTER */
#define SU 0000001 /* Simultaneous update enabled */
#define NOSU 0000000 /* Simultaneous update disabled */
#define ENABLEDAC 0000002 /* Enable specified DAC */
#define DISABLEDAC 0000000 /* Disable specified DAC */
#define RANGE2V5 0000000 /* 2.5V */
#define RANGE5V 0000200 /* 5V */
#define RANGE10V 0000300 /* 10V */
#define UNIP 0000400 /* Unipolar outputs */
#define BIP 0000000 /* Bipolar outputs */
#define DACALIBRATION1 4 /* D/A CALIBRATION REGISTER 1 */
/* write bits */
#define SERIAL_IN_BIT 0x1 /* serial data input for eeprom, caldacs, reference dac */
#define CAL_CHANNEL_MASK (0x7 << 1)
#define CAL_CHANNEL_BITS(channel) (((channel) << 1) & CAL_CHANNEL_MASK)
/* read bits */
#define CAL_COUNTER_MASK 0x1f
#define CAL_COUNTER_OVERFLOW_BIT 0x20 /* calibration counter overflow status bit */
#define AO_BELOW_REF_BIT 0x40 /* analog output is less than reference dac voltage */
#define SERIAL_OUT_BIT 0x80 /* serial data out, for reading from eeprom */
#define DACALIBRATION2 6 /* D/A CALIBRATION REGISTER 2 */
#define SELECT_EEPROM_BIT 0x1 /* send serial data in to eeprom */
#define DESELECT_REF_DAC_BIT 0x2 /* don't send serial data to MAX542 reference dac */
#define DESELECT_CALDAC_BIT(n) (0x4 << (n)) /* don't send serial data to caldac n */
#define DUMMY_BIT 0x40 /* manual says to set this bit with no explanation */
#define DADATA 8 /* FIRST D/A DATA REGISTER (0) */
static const struct comedi_lrange cb_pcidda_ranges = {
6,
{
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
}
};
struct cb_pcidda_board {
const char *name;
char status; /* Driver status: */
/*
* 0 - tested
* 1 - manual read, not tested
* 2 - manual not read
*/
unsigned short device_id;
int ao_chans;
int ao_bits;
const struct comedi_lrange *ranges;
};
static const struct cb_pcidda_board cb_pcidda_boards[] = {
{
.name = "pci-dda02/12",
.status = 1,
.device_id = 0x20,
.ao_chans = 2,
.ao_bits = 12,
.ranges = &cb_pcidda_ranges,
},
{
.name = "pci-dda04/12",
.status = 1,
.device_id = 0x21,
.ao_chans = 4,
.ao_bits = 12,
.ranges = &cb_pcidda_ranges,
},
{
.name = "pci-dda08/12",
.status = 0,
.device_id = 0x22,
.ao_chans = 8,
.ao_bits = 12,
.ranges = &cb_pcidda_ranges,
},
{
.name = "pci-dda02/16",
.status = 2,
.device_id = 0x23,
.ao_chans = 2,
.ao_bits = 16,
.ranges = &cb_pcidda_ranges,
},
{
.name = "pci-dda04/16",
.status = 2,
.device_id = 0x24,
.ao_chans = 4,
.ao_bits = 16,
.ranges = &cb_pcidda_ranges,
},
{
.name = "pci-dda08/16",
.status = 0,
.device_id = 0x25,
.ao_chans = 8,
.ao_bits = 16,
.ranges = &cb_pcidda_ranges,
},
};
static DEFINE_PCI_DEVICE_TABLE(cb_pcidda_pci_table) = {
{
PCI_VENDOR_ID_CB, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_CB, 0x0021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_CB, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_CB, 0x0023, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_CB, 0x0024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_CB, 0x0025, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
0}
};
MODULE_DEVICE_TABLE(pci, cb_pcidda_pci_table);
#define thisboard ((const struct cb_pcidda_board *)dev->board_ptr)
struct cb_pcidda_private {
int data;
/* would be useful for a PCI device */
struct pci_dev *pci_dev;
unsigned long digitalio;
unsigned long dac;
/* unsigned long control_status; */
/* unsigned long adc_fifo; */
unsigned int dac_cal1_bits; /* bits last written to da calibration register 1 */
unsigned int ao_range[MAX_AO_CHANNELS]; /* current range settings for output channels */
u16 eeprom_data[EEPROM_SIZE]; /* software copy of board's eeprom */
};
#define devpriv ((struct cb_pcidda_private *)dev->private)
static int cb_pcidda_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int cb_pcidda_detach(struct comedi_device *dev);
/* static int cb_pcidda_ai_rinsn(struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data); */
static int cb_pcidda_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
/* static int cb_pcidda_ai_cmd(struct comedi_device *dev, struct *comedi_subdevice *s);*/
/* static int cb_pcidda_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); */
/* static int cb_pcidda_ns_to_timer(unsigned int *ns,int *round); */
static unsigned int cb_pcidda_serial_in(struct comedi_device *dev);
static void cb_pcidda_serial_out(struct comedi_device *dev, unsigned int value,
unsigned int num_bits);
static unsigned int cb_pcidda_read_eeprom(struct comedi_device *dev,
unsigned int address);
static void cb_pcidda_calibrate(struct comedi_device *dev, unsigned int channel,
unsigned int range);
static struct comedi_driver driver_cb_pcidda = {
.driver_name = "cb_pcidda",
.module = THIS_MODULE,
.attach = cb_pcidda_attach,
.detach = cb_pcidda_detach,
};
static int cb_pcidda_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
struct pci_dev *pcidev;
int index;
printk("comedi%d: cb_pcidda: ", dev->minor);
if (alloc_private(dev, sizeof(struct cb_pcidda_private)) < 0)
return -ENOMEM;
printk("\n");
for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
pcidev != NULL;
pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
if (pcidev->vendor == PCI_VENDOR_ID_CB) {
if (it->options[0] || it->options[1]) {
if (pcidev->bus->number != it->options[0] ||
PCI_SLOT(pcidev->devfn) != it->options[1]) {
continue;
}
}
for (index = 0; index < ARRAY_SIZE(cb_pcidda_boards); index++) {
if (cb_pcidda_boards[index].device_id ==
pcidev->device) {
goto found;
}
}
}
}
if (!pcidev) {
printk
("Not a ComputerBoards/MeasurementComputing card on requested position\n");
return -EIO;
}
found:
devpriv->pci_dev = pcidev;
dev->board_ptr = cb_pcidda_boards + index;
/* "thisboard" macro can be used from here. */
printk("Found %s at requested position\n", thisboard->name);
/*
* Enable PCI device and request regions.
*/
if (comedi_pci_enable(pcidev, thisboard->name)) {
printk
("cb_pcidda: failed to enable PCI device and request regions\n");
return -EIO;
}
devpriv->digitalio =
pci_resource_start(devpriv->pci_dev, DIGITALIO_BADRINDEX);
devpriv->dac = pci_resource_start(devpriv->pci_dev, DAC_BADRINDEX);
if (thisboard->status == 2)
printk
("WARNING: DRIVER FOR THIS BOARD NOT CHECKED WITH MANUAL. "
"WORKS ASSUMING FULL COMPATIBILITY WITH PCI-DDA08/12. "
"PLEASE REPORT USAGE TO <ivanmr@altavista.com>.\n");
dev->board_name = thisboard->name;
if (alloc_subdevices(dev, 3) < 0)
return -ENOMEM;
s = dev->subdevices + 0;
/* analog output subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = thisboard->ao_chans;
s->maxdata = (1 << thisboard->ao_bits) - 1;
s->range_table = thisboard->ranges;
s->insn_write = cb_pcidda_ao_winsn;
/* s->subdev_flags |= SDF_CMD_READ; */
/* s->do_cmd = cb_pcidda_ai_cmd; */
/* s->do_cmdtest = cb_pcidda_ai_cmdtest; */
/* two 8255 digital io subdevices */
s = dev->subdevices + 1;
subdev_8255_init(dev, s, NULL, devpriv->digitalio);
s = dev->subdevices + 2;
subdev_8255_init(dev, s, NULL, devpriv->digitalio + PORT2A);
printk(" eeprom:");
for (index = 0; index < EEPROM_SIZE; index++) {
devpriv->eeprom_data[index] = cb_pcidda_read_eeprom(dev, index);
printk(" %i:0x%x ", index, devpriv->eeprom_data[index]);
}
printk("\n");
/* set calibrations dacs */
for (index = 0; index < thisboard->ao_chans; index++)
cb_pcidda_calibrate(dev, index, devpriv->ao_range[index]);
return 1;
}
static int cb_pcidda_detach(struct comedi_device *dev)
{
if (devpriv) {
if (devpriv->pci_dev) {
if (devpriv->dac)
comedi_pci_disable(devpriv->pci_dev);
pci_dev_put(devpriv->pci_dev);
}
}
/* cleanup 8255 */
if (dev->subdevices) {
subdev_8255_cleanup(dev, dev->subdevices + 1);
subdev_8255_cleanup(dev, dev->subdevices + 2);
}
printk("comedi%d: cb_pcidda: remove\n", dev->minor);
return 0;
}
#if 0
static int cb_pcidda_ai_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
printk("cb_pcidda_ai_cmd\n");
printk("subdev: %d\n", cmd->subdev);
printk("flags: %d\n", cmd->flags);
printk("start_src: %d\n", cmd->start_src);
printk("start_arg: %d\n", cmd->start_arg);
printk("scan_begin_src: %d\n", cmd->scan_begin_src);
printk("convert_src: %d\n", cmd->convert_src);
printk("convert_arg: %d\n", cmd->convert_arg);
printk("scan_end_src: %d\n", cmd->scan_end_src);
printk("scan_end_arg: %d\n", cmd->scan_end_arg);
printk("stop_src: %d\n", cmd->stop_src);
printk("stop_arg: %d\n", cmd->stop_arg);
printk("chanlist_len: %d\n", cmd->chanlist_len);
}
#endif
#if 0
static int cb_pcidda_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
int tmp;
/* cmdtest tests a particular command to see if it is valid.
* Using the cmdtest ioctl, a user can create a valid cmd
* and then have it executes by the cmd ioctl.
*
* cmdtest returns 1,2,3,4 or 0, depending on which tests
* the command passes. */
/* step 1: make sure trigger sources are trivially valid */
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_TIMER | TRIG_EXT;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/* step 2: make sure trigger sources are unique and mutually compatible */
/* note that mutual compatibility is not an issue here */
if (cmd->scan_begin_src != TRIG_TIMER
&& cmd->scan_begin_src != TRIG_EXT)
err++;
if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT)
err++;
if (cmd->stop_src != TRIG_TIMER && cmd->stop_src != TRIG_EXT)
err++;
if (err)
return 2;
/* step 3: make sure arguments are trivially compatible */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
#define MAX_SPEED 10000 /* in nanoseconds */
#define MIN_SPEED 1000000000 /* in nanoseconds */
if (cmd->scan_begin_src == TRIG_TIMER) {
if (cmd->scan_begin_arg < MAX_SPEED) {
cmd->scan_begin_arg = MAX_SPEED;
err++;
}
if (cmd->scan_begin_arg > MIN_SPEED) {
cmd->scan_begin_arg = MIN_SPEED;
err++;
}
} else {
/* external trigger */
/* should be level/edge, hi/lo specification here */
/* should specify multiple external triggers */
if (cmd->scan_begin_arg > 9) {
cmd->scan_begin_arg = 9;
err++;
}
}
if (cmd->convert_src == TRIG_TIMER) {
if (cmd->convert_arg < MAX_SPEED) {
cmd->convert_arg = MAX_SPEED;
err++;
}
if (cmd->convert_arg > MIN_SPEED) {
cmd->convert_arg = MIN_SPEED;
err++;
}
} else {
/* external trigger */
/* see above */
if (cmd->convert_arg > 9) {
cmd->convert_arg = 9;
err++;
}
}
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_COUNT) {
if (cmd->stop_arg > 0x00ffffff) {
cmd->stop_arg = 0x00ffffff;
err++;
}
} else {
/* TRIG_NONE */
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->scan_begin_src == TRIG_TIMER) {
tmp = cmd->scan_begin_arg;
cb_pcidda_ns_to_timer(&cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
if (tmp != cmd->scan_begin_arg)
err++;
}
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
cb_pcidda_ns_to_timer(&cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
if (tmp != cmd->convert_arg)
err++;
if (cmd->scan_begin_src == TRIG_TIMER &&
cmd->scan_begin_arg <
cmd->convert_arg * cmd->scan_end_arg) {
cmd->scan_begin_arg =
cmd->convert_arg * cmd->scan_end_arg;
err++;
}
}
if (err)
return 4;
return 0;
}
#endif
#if 0
static int cb_pcidda_ns_to_timer(unsigned int *ns, int round)
{
/* trivial timer */
return *ns;
}
#endif
static int cb_pcidda_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int command;
unsigned int channel, range;
channel = CR_CHAN(insn->chanspec);
range = CR_RANGE(insn->chanspec);
/* adjust calibration dacs if range has changed */
if (range != devpriv->ao_range[channel])
cb_pcidda_calibrate(dev, channel, range);
/* output channel configuration */
command = NOSU | ENABLEDAC;
/* output channel range */
switch (range) {
case 0:
command |= BIP | RANGE10V;
break;
case 1:
command |= BIP | RANGE5V;
break;
case 2:
command |= BIP | RANGE2V5;
break;
case 3:
command |= UNIP | RANGE10V;
break;
case 4:
command |= UNIP | RANGE5V;
break;
case 5:
command |= UNIP | RANGE2V5;
break;
};
/* output channel specification */
command |= channel << 2;
outw(command, devpriv->dac + DACONTROL);
/* write data */
outw(data[0], devpriv->dac + DADATA + channel * 2);
/* return the number of samples read/written */
return 1;
}
/* lowlevel read from eeprom */
static unsigned int cb_pcidda_serial_in(struct comedi_device *dev)
{
unsigned int value = 0;
int i;
const int value_width = 16; /* number of bits wide values are */
for (i = 1; i <= value_width; i++) {
/* read bits most significant bit first */
if (inw_p(devpriv->dac + DACALIBRATION1) & SERIAL_OUT_BIT)
value |= 1 << (value_width - i);
}
return value;
}
/* lowlevel write to eeprom/dac */
static void cb_pcidda_serial_out(struct comedi_device *dev, unsigned int value,
unsigned int num_bits)
{
int i;
for (i = 1; i <= num_bits; i++) {
/* send bits most significant bit first */
if (value & (1 << (num_bits - i)))
devpriv->dac_cal1_bits |= SERIAL_IN_BIT;
else
devpriv->dac_cal1_bits &= ~SERIAL_IN_BIT;
outw_p(devpriv->dac_cal1_bits, devpriv->dac + DACALIBRATION1);
}
}
/* reads a 16 bit value from board's eeprom */
static unsigned int cb_pcidda_read_eeprom(struct comedi_device *dev,
unsigned int address)
{
unsigned int i;
unsigned int cal2_bits;
unsigned int value;
const int max_num_caldacs = 4; /* one caldac for every two dac channels */
const int read_instruction = 0x6; /* bits to send to tell eeprom we want to read */
const int instruction_length = 3;
const int address_length = 8;
/* send serial output stream to eeprom */
cal2_bits = SELECT_EEPROM_BIT | DESELECT_REF_DAC_BIT | DUMMY_BIT;
/* deactivate caldacs (one caldac for every two channels) */
for (i = 0; i < max_num_caldacs; i++)
cal2_bits |= DESELECT_CALDAC_BIT(i);
outw_p(cal2_bits, devpriv->dac + DACALIBRATION2);
/* tell eeprom we want to read */
cb_pcidda_serial_out(dev, read_instruction, instruction_length);
/* send address we want to read from */
cb_pcidda_serial_out(dev, address, address_length);
value = cb_pcidda_serial_in(dev);
/* deactivate eeprom */
cal2_bits &= ~SELECT_EEPROM_BIT;
outw_p(cal2_bits, devpriv->dac + DACALIBRATION2);
return value;
}
/* writes to 8 bit calibration dacs */
static void cb_pcidda_write_caldac(struct comedi_device *dev,
unsigned int caldac, unsigned int channel,
unsigned int value)
{
unsigned int cal2_bits;
unsigned int i;
const int num_channel_bits = 3; /* caldacs use 3 bit channel specification */
const int num_caldac_bits = 8; /* 8 bit calibration dacs */
const int max_num_caldacs = 4; /* one caldac for every two dac channels */
/* write 3 bit channel */
cb_pcidda_serial_out(dev, channel, num_channel_bits);
/* write 8 bit caldac value */
cb_pcidda_serial_out(dev, value, num_caldac_bits);
cal2_bits = DESELECT_REF_DAC_BIT | DUMMY_BIT;
/* deactivate caldacs (one caldac for every two channels) */
for (i = 0; i < max_num_caldacs; i++)
cal2_bits |= DESELECT_CALDAC_BIT(i);
/* activate the caldac we want */
cal2_bits &= ~DESELECT_CALDAC_BIT(caldac);
outw_p(cal2_bits, devpriv->dac + DACALIBRATION2);
/* deactivate caldac */
cal2_bits |= DESELECT_CALDAC_BIT(caldac);
outw_p(cal2_bits, devpriv->dac + DACALIBRATION2);
}
/* returns caldac that calibrates given analog out channel */
static unsigned int caldac_number(unsigned int channel)
{
return channel / 2;
}
/* returns caldac channel that provides fine gain for given ao channel */
static unsigned int fine_gain_channel(unsigned int ao_channel)
{
return 4 * (ao_channel % 2);
}
/* returns caldac channel that provides coarse gain for given ao channel */
static unsigned int coarse_gain_channel(unsigned int ao_channel)
{
return 1 + 4 * (ao_channel % 2);
}
/* returns caldac channel that provides coarse offset for given ao channel */
static unsigned int coarse_offset_channel(unsigned int ao_channel)
{
return 2 + 4 * (ao_channel % 2);
}
/* returns caldac channel that provides fine offset for given ao channel */
static unsigned int fine_offset_channel(unsigned int ao_channel)
{
return 3 + 4 * (ao_channel % 2);
}
/* returns eeprom address that provides offset for given ao channel and range */
static unsigned int offset_eeprom_address(unsigned int ao_channel,
unsigned int range)
{
return 0x7 + 2 * range + 12 * ao_channel;
}
/* returns eeprom address that provides gain calibration for given ao channel and range */
static unsigned int gain_eeprom_address(unsigned int ao_channel,
unsigned int range)
{
return 0x8 + 2 * range + 12 * ao_channel;
}
/* returns upper byte of eeprom entry, which gives the coarse adjustment values */
static unsigned int eeprom_coarse_byte(unsigned int word)
{
return (word >> 8) & 0xff;
}
/* returns lower byte of eeprom entry, which gives the fine adjustment values */
static unsigned int eeprom_fine_byte(unsigned int word)
{
return word & 0xff;
}
/* set caldacs to eeprom values for given channel and range */
static void cb_pcidda_calibrate(struct comedi_device *dev, unsigned int channel,
unsigned int range)
{
unsigned int coarse_offset, fine_offset, coarse_gain, fine_gain;
/* remember range so we can tell when we need to readjust calibration */
devpriv->ao_range[channel] = range;
/* get values from eeprom data */
coarse_offset =
eeprom_coarse_byte(devpriv->eeprom_data
[offset_eeprom_address(channel, range)]);
fine_offset =
eeprom_fine_byte(devpriv->eeprom_data
[offset_eeprom_address(channel, range)]);
coarse_gain =
eeprom_coarse_byte(devpriv->eeprom_data
[gain_eeprom_address(channel, range)]);
fine_gain =
eeprom_fine_byte(devpriv->eeprom_data
[gain_eeprom_address(channel, range)]);
/* set caldacs */
cb_pcidda_write_caldac(dev, caldac_number(channel),
coarse_offset_channel(channel), coarse_offset);
cb_pcidda_write_caldac(dev, caldac_number(channel),
fine_offset_channel(channel), fine_offset);
cb_pcidda_write_caldac(dev, caldac_number(channel),
coarse_gain_channel(channel), coarse_gain);
cb_pcidda_write_caldac(dev, caldac_number(channel),
fine_gain_channel(channel), fine_gain);
}
COMEDI_PCI_INITCLEANUP(driver_cb_pcidda, cb_pcidda_pci_table);
| luckasfb/OT_903D-kernel-2.6.35.7 | kernel/drivers/staging/comedi/drivers/cb_pcidda.c | C | gpl-2.0 | 20,633 |
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2008 VIA Technologies, Inc.
* (Written by Aaron Lwe <aaron.lwe@gmail.com> for VIA)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdint.h>
#include <device/pci_def.h>
#include <device/pci_ids.h>
#include <arch/io.h>
#include <device/pnp_def.h>
#include <console/console.h>
#include <lib.h>
#include <northbridge/via/cn700/raminit.h>
#include <cpu/x86/bist.h>
#include <delay.h>
#include "southbridge/via/vt8237r/early_smbus.c"
#include "southbridge/via/vt8237r/early_serial.c"
#include <spd.h>
static inline int spd_read_byte(unsigned device, unsigned address)
{
return smbus_read_byte(device, address);
}
#include "northbridge/via/cn700/raminit.c"
static void enable_mainboard_devices(void)
{
device_t dev;
dev = pci_locate_device(PCI_ID(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_VT8237R_LPC), 0);
if (dev == PCI_DEV_INVALID)
die("Southbridge not found!!!\n");
/* bit=0 means enable function (per CX700 datasheet)
* 5 16.1 USB 2
* 4 16.0 USB 1
* 3 15.0 SATA and PATA
* 2 16.2 USB 3
* 1 16.4 USB EHCI
*/
pci_write_config8(dev, 0x50, 0x80);
/* bit=1 means enable internal function (per CX700 datasheet)
* 3 Internal RTC
* 2 Internal PS2 Mouse
* 1 Internal KBC Configuration
* 0 Internal Keyboard Controller
*/
pci_write_config8(dev, 0x51, 0x1d);
}
static const struct mem_controller ctrl = {
.d0f0 = 0x0000,
.d0f2 = 0x2000,
.d0f3 = 0x3000,
.d0f4 = 0x4000,
.d0f7 = 0x7000,
.d1f0 = 0x8000,
.channel0 = { DIMM0 },
};
#include <cpu/intel/romstage.h>
void main(unsigned long bist)
{
/* Enable multifunction for northbridge. */
pci_write_config8(ctrl.d0f0, 0x4f, 0x01);
enable_vt8237r_serial();
console_init();
enable_smbus();
smbus_fixup(&ctrl);
report_bist_failure(bist);
enable_mainboard_devices();
ddr_ram_setup(&ctrl);
}
| latelee/coreboot | src/mainboard/via/epia-cn/romstage.c | C | gpl-2.0 | 2,336 |
/* dia -- an diagram creation/manipulation program
* Copyright (C) 1998 Alexander Larsson
*
* dia_svg.c -- Refactoring by Hans Breuer from :
*
* Custom Objects -- objects defined in XML rather than C.
* Copyright (C) 1999 James Henstridge.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include <string.h>
#include <stdlib.h>
#include <pango/pango-attributes.h>
#include "dia_svg.h"
/**
* SECTION:dia_svg
* @title: Dia SVG
* @short_description: A set of function helping to read and write SVG with Dia
*
* The Dia application supports various variants of SVG. There are
* at least two importers of SVG dialects, namely \ref Shapes and
* the standard SVG importer \ref Plugins. Both are using theses
* services to a large extend, but they are also doing there own
* thing regarding the SVG dialect interpretation.
*/
/*!
* A global variable to be accessed by "currentColor"
*/
static int _current_color = 0xFF000000;
/*!
* \brief Initialize a style object from another style object or defaults.
* @param gs An SVG style object to initialize.
* @param parent_style An SVG style object to copy values from, or NULL,
* in which case defaults will be used.
* \ingroup DiaSvg
*/
void
dia_svg_style_init(DiaSvgStyle *gs, DiaSvgStyle *parent_style)
{
g_return_if_fail (gs);
gs->stroke = parent_style ? parent_style->stroke : DIA_SVG_COLOUR_DEFAULT;
gs->stroke_opacity = parent_style ? parent_style->stroke_opacity : 1.0;
gs->line_width = parent_style ? parent_style->line_width : 0.0;
gs->linestyle = parent_style ? parent_style->linestyle : DIA_LINE_STYLE_SOLID;
gs->dashlength = parent_style ? parent_style->dashlength : 1;
/* http://www.w3.org/TR/SVG/painting.html#FillProperty - default black
* but we still have to see the difference
*/
gs->fill = parent_style ? parent_style->fill : DIA_SVG_COLOUR_DEFAULT;
gs->fill_opacity = parent_style ? parent_style->fill_opacity : 1.0;
gs->linecap = parent_style ? parent_style->linecap : DIA_LINE_CAPS_DEFAULT;
gs->linejoin = parent_style ? parent_style->linejoin : DIA_LINE_JOIN_DEFAULT;
gs->linestyle = parent_style ? parent_style->linestyle : DIA_LINE_STYLE_DEFAULT;
gs->font = (parent_style && parent_style->font) ? g_object_ref (parent_style->font) : NULL;
gs->font_height = parent_style ? parent_style->font_height : 0.8;
gs->alignment = parent_style ? parent_style->alignment : DIA_ALIGN_LEFT;
gs->stop_color = parent_style ? parent_style->stop_color : 0x000000; /* default black */
gs->stop_opacity = parent_style ? parent_style->stop_opacity : 1.0;
}
/*!
* \brief Copy style values from one SVG style object to another.
* @param dest SVG style object to copy to.
* @param src SVG style object to copy from.
* \ingroup DiaSvg
*/
void
dia_svg_style_copy(DiaSvgStyle *dest, DiaSvgStyle *src)
{
g_return_if_fail (dest && src);
dest->stroke = src->stroke;
dest->stroke_opacity = src->stroke_opacity;
dest->line_width = src->line_width;
dest->linestyle = src->linestyle;
dest->dashlength = src->dashlength;
dest->fill = src->fill;
dest->fill_opacity = src->fill_opacity;
g_clear_object (&dest->font);
dest->font = src->font ? g_object_ref (src->font) : NULL;
dest->font_height = src->font_height;
dest->alignment = src->alignment;
dest->stop_color = src->stop_color;
dest->stop_opacity = src->stop_opacity;
}
static const struct _SvgNamedColor {
const gint value;
const char *name;
} _svg_named_colors [] = {
/* complete list copied from sodipodi source */
{ 0xF0F8FF, "aliceblue" },
{ 0xFAEBD7, "antiquewhite" },
{ 0x00FFFF, "aqua" },
{ 0x7FFFD4, "aquamarine" },
{ 0xF0FFFF, "azure" },
{ 0xF5F5DC, "beige" },
{ 0xFFE4C4, "bisque" },
{ 0x000000, "black" },
{ 0xFFEBCD, "blanchedalmond" },
{ 0x0000FF, "blue" },
{ 0x8A2BE2, "blueviolet" },
{ 0xA52A2A, "brown" },
{ 0xDEB887, "burlywood" },
{ 0x5F9EA0, "cadetblue" },
{ 0x7FFF00, "chartreuse" },
{ 0xD2691E, "chocolate" },
{ 0xFF7F50, "coral" },
{ 0x6495ED, "cornflowerblue" },
{ 0xFFF8DC, "cornsilk" },
{ 0xDC143C, "crimson" },
{ 0x00FFFF, "cyan" },
{ 0x00008B, "darkblue" },
{ 0x008B8B, "darkcyan" },
{ 0xB8860B, "darkgoldenrod" },
{ 0xA9A9A9, "darkgray" },
{ 0x006400, "darkgreen" },
{ 0xA9A9A9, "darkgrey" },
{ 0xBDB76B, "darkkhaki" },
{ 0x8B008B, "darkmagenta" },
{ 0x556B2F, "darkolivegreen" },
{ 0xFF8C00, "darkorange" },
{ 0x9932CC, "darkorchid" },
{ 0x8B0000, "darkred" },
{ 0xE9967A, "darksalmon" },
{ 0x8FBC8F, "darkseagreen" },
{ 0x483D8B, "darkslateblue" },
{ 0x2F4F4F, "darkslategray" },
{ 0x2F4F4F, "darkslategrey" },
{ 0x00CED1, "darkturquoise" },
{ 0x9400D3, "darkviolet" },
{ 0xFF1493, "deeppink" },
{ 0x00BFFF, "deepskyblue" },
{ 0x696969, "dimgray" },
{ 0x696969, "dimgrey" },
{ 0x1E90FF, "dodgerblue" },
{ 0xB22222, "firebrick" },
{ 0xFFFAF0, "floralwhite" },
{ 0x228B22, "forestgreen" },
{ 0xFF00FF, "fuchsia" },
{ 0xDCDCDC, "gainsboro" },
{ 0xF8F8FF, "ghostwhite" },
{ 0xFFD700, "gold" },
{ 0xDAA520, "goldenrod" },
{ 0x808080, "gray" },
{ 0x008000, "green" },
{ 0xADFF2F, "greenyellow" },
{ 0x808080, "grey" },
{ 0xF0FFF0, "honeydew" },
{ 0xFF69B4, "hotpink" },
{ 0xCD5C5C, "indianred" },
{ 0x4B0082, "indigo" },
{ 0xFFFFF0, "ivory" },
{ 0xF0E68C, "khaki" },
{ 0xE6E6FA, "lavender" },
{ 0xFFF0F5, "lavenderblush" },
{ 0x7CFC00, "lawngreen" },
{ 0xFFFACD, "lemonchiffon" },
{ 0xADD8E6, "lightblue" },
{ 0xF08080, "lightcoral" },
{ 0xE0FFFF, "lightcyan" },
{ 0xFAFAD2, "lightgoldenrodyellow" },
{ 0xD3D3D3, "lightgray" },
{ 0x90EE90, "lightgreen" },
{ 0xD3D3D3, "lightgrey" },
{ 0xFFB6C1, "lightpink" },
{ 0xFFA07A, "lightsalmon" },
{ 0x20B2AA, "lightseagreen" },
{ 0x87CEFA, "lightskyblue" },
{ 0x778899, "lightslategray" },
{ 0x778899, "lightslategrey" },
{ 0xB0C4DE, "lightsteelblue" },
{ 0xFFFFE0, "lightyellow" },
{ 0x00FF00, "lime" },
{ 0x32CD32, "limegreen" },
{ 0xFAF0E6, "linen" },
{ 0xFF00FF, "magenta" },
{ 0x800000, "maroon" },
{ 0x66CDAA, "mediumaquamarine" },
{ 0x0000CD, "mediumblue" },
{ 0xBA55D3, "mediumorchid" },
{ 0x9370DB, "mediumpurple" },
{ 0x3CB371, "mediumseagreen" },
{ 0x7B68EE, "mediumslateblue" },
{ 0x00FA9A, "mediumspringgreen" },
{ 0x48D1CC, "mediumturquoise" },
{ 0xC71585, "mediumvioletred" },
{ 0x191970, "midnightblue" },
{ 0xF5FFFA, "mintcream" },
{ 0xFFE4E1, "mistyrose" },
{ 0xFFE4B5, "moccasin" },
{ 0xFFDEAD, "navajowhite" },
{ 0x000080, "navy" },
{ 0xFDF5E6, "oldlace" },
{ 0x808000, "olive" },
{ 0x6B8E23, "olivedrab" },
{ 0xFFA500, "orange" },
{ 0xFF4500, "orangered" },
{ 0xDA70D6, "orchid" },
{ 0xEEE8AA, "palegoldenrod" },
{ 0x98FB98, "palegreen" },
{ 0xAFEEEE, "paleturquoise" },
{ 0xDB7093, "palevioletred" },
{ 0xFFEFD5, "papayawhip" },
{ 0xFFDAB9, "peachpuff" },
{ 0xCD853F, "peru" },
{ 0xFFC0CB, "pink" },
{ 0xDDA0DD, "plum" },
{ 0xB0E0E6, "powderblue" },
{ 0x800080, "purple" },
{ 0xFF0000, "red" },
{ 0xBC8F8F, "rosybrown" },
{ 0x4169E1, "royalblue" },
{ 0x8B4513, "saddlebrown" },
{ 0xFA8072, "salmon" },
{ 0xF4A460, "sandybrown" },
{ 0x2E8B57, "seagreen" },
{ 0xFFF5EE, "seashell" },
{ 0xA0522D, "sienna" },
{ 0xC0C0C0, "silver" },
{ 0x87CEEB, "skyblue" },
{ 0x6A5ACD, "slateblue" },
{ 0x708090, "slategray" },
{ 0x708090, "slategrey" },
{ 0xFFFAFA, "snow" },
{ 0x00FF7F, "springgreen" },
{ 0x4682B4, "steelblue" },
{ 0xD2B48C, "tan" },
{ 0x008080, "teal" },
{ 0xD8BFD8, "thistle" },
{ 0xFF6347, "tomato" },
{ 0x40E0D0, "turquoise" },
{ 0xEE82EE, "violet" },
{ 0xF5DEB3, "wheat" },
{ 0xFFFFFF, "white" },
{ 0xF5F5F5, "whitesmoke" },
{ 0xFFFF00, "yellow" },
{ 0x9ACD32, "yellowgreen" }
};
static int
_cmp_color (const void *key, const void *elem)
{
const char *a = key;
const struct _SvgNamedColor *color = elem;
return strcmp (a, color->name);
}
/*!
* \brief Get an SVG color value by name
*
* The list of named SVG tiny colors has only 17 entries according to
* http://www.w3.org/TR/CSS21/syndata.html#color-units
* Still pango_color_parse() does not support seven of them including
* 'white'. This function supports all of them.
* Against the SVG full specification (and the SVG Test Suite) pango's
* long list is still missing colors, e.g. crimson. So this function is
* using a supposed to be complete internal list.
*
* \ingroup DiaSvg
*/
static gboolean
svg_named_color (const char *name, gint32 *color)
{
const struct _SvgNamedColor *elem;
g_return_val_if_fail (name != NULL && color != NULL, FALSE);
elem = bsearch (name, _svg_named_colors,
G_N_ELEMENTS(_svg_named_colors), sizeof(_svg_named_colors[0]),
_cmp_color);
if (elem) {
*color = elem->value;
return TRUE;
}
return FALSE;
}
/*!
* \brief Parse an SVG color description.
*
* @param color A place to store the color information (0RGB)
* @param str An SVG color description string to parse.
* @return TRUE if parsing was successful.
*
* This function is rather tolerant compared to the SVG specification.
* It supports special names like 'fg', 'bg', 'foregroumd', 'background';
* three numeric representations: '\#FF0000', 'rgb(1.0,0.0,0.0), 'rgb(100%,0%,0%)'
* and named colors from two domains: SVG and Pango.
*
* \note Shouldn't we use an actual Dia Color object as return value?
* Would require that the DiaSvgStyle object uses that, too. If we did that,
* we could even return the color object directly, and we would be able to use
* >8 bits per channel.
* But we would not be able to handle named colors anymore ...
*
* \ingroup DiaSvg
*/
static gboolean
_parse_color(gint32 *color, const char *str)
{
if (str[0] == '#') {
char *endp = NULL;
guint32 val = strtol(str+1, &endp, 16);
if (endp - (str + 1) > 3) /* no 16-bit color here */
*color = val & 0xffffff;
else /* weak estimation: Pango is reusing bits to fill */
*color = ((0xF & val)<<4) | ((0xF0 & val)<<8) | (((0xF00 & val)>>4)<<16);
} else if (0 == strncmp(str, "none", 4))
*color = DIA_SVG_COLOUR_NONE;
else if (0 == strncmp(str, "foreground", 10) || 0 == strncmp(str, "fg", 2) ||
0 == strncmp(str, "inverse", 7))
*color = DIA_SVG_COLOUR_FOREGROUND;
else if (0 == strncmp(str, "background", 10) || 0 == strncmp(str, "bg", 2) ||
0 == strncmp(str, "default", 7))
*color = DIA_SVG_COLOUR_BACKGROUND;
else if (0 == strcmp(str, "text"))
*color = DIA_SVG_COLOUR_TEXT;
else if (0 == strcmp(str, "currentColor"))
*color = _current_color;
else if (0 == strncmp(str, "rgb(", 4)) {
int r = 0, g = 0, b = 0;
if (3 == sscanf (str+4, "%d,%d,%d", &r, &g, &b)) {
/* Set alpha to 1.0 */
*color = ((0xFF<<24) & 0xFF000000) | ((r<<16) & 0xFF0000) | ((g<<8) & 0xFF00) | (b & 0xFF);
} else if (strchr (str+4, '%')) {
/* e.g. cairo uses percent values */
char **vals = g_strsplit (str+4, "%,", -1);
int i;
*color = 0xFF000000;
for (i = 0; vals[i] && i < 3; ++i)
*color |= ((int)(((255 * g_ascii_strtod(vals[i], NULL)) / 100))<<(16-(8*i)));
g_strfreev (vals);
} else {
return FALSE;
}
} else if (0 == strncmp(str, "rgba(", 5)) {
int r = 0, g = 0, b = 0, a = 0;
if (4 == sscanf (str+4, "%d,%d,%d,%d", &r, &g, &b, &a))
*color = ((a<<24) & 0xFF000000) | ((r<<16) & 0xFF0000) | ((g<<8) & 0xFF00) | (b & 0xFF);
else
return FALSE;
} else {
char* se = strchr (str, ';');
if (!se) /* style might have trailign space */
se = strchr (str, ' ');
if (!se) {
return svg_named_color (str, color);
} else {
/* need to make a copy of the color only */
gboolean ret;
char *sc = g_strndup (str, se - str);
ret = svg_named_color (sc, color);
g_clear_pointer (&sc, g_free);
return ret;
}
}
return TRUE;
}
/*!
* \brief Convert a string to a color
*
* SVG spec also allows 'inherit' as color value, which leads
* to false here. Should still mostly work because the color
* is supposed to be initialized before.
*
* \ingroup DiaSvg
*/
gboolean
dia_svg_parse_color (const char *str, Color *color)
{
gint32 c;
gboolean ret = _parse_color (&c, str);
if (ret) {
color->red = ((c & 0xff0000) >> 16) / 255.0;
color->green = ((c & 0x00ff00) >> 8) / 255.0;
color->blue = (c & 0x0000ff) / 255.0;
color->alpha = 1.0;
}
return ret;
}
enum
{
FONT_NAME_LENGTH_MAX = 40
};
static void
_parse_dasharray (DiaSvgStyle *s, double user_scale, char *str, char **end)
{
char *ptr;
/* by also splitting on ';' we can also parse the continued style string */
char **dashes = g_regex_split_simple ("[\\s,;]+", (char *) str, 0, 0);
int n = 0;
double dl;
s->dashlength = g_ascii_strtod(str, &ptr);
if (s->dashlength <= 0.0) {
/* e.g. "none" */
s->linestyle = DIA_LINE_STYLE_SOLID;
} else if (user_scale > 0) {
s->dashlength /= user_scale;
}
if (s->dashlength) { /* at least one value */
while (dashes[n] && g_ascii_strtod (dashes[n], NULL) > 0) {
++n; /* Dia can not do arbitrary length, the number of dashes gives the style */
}
}
if (n > 0) {
s->dashlength = g_ascii_strtod (dashes[0], NULL);
}
if (user_scale > 0) {
s->dashlength /= user_scale;
}
switch (n) {
case 0:
s->linestyle = DIA_LINE_STYLE_SOLID;
break;
case 1:
s->linestyle = DIA_LINE_STYLE_DASHED;
break;
case 2:
dl = g_ascii_strtod (dashes[1], NULL);
if (user_scale > 0) {
dl /= user_scale;
}
if (dl < s->line_width || dl > s->dashlength) {
/* the difference is arbitrary */
s->linestyle = DIA_LINE_STYLE_DOTTED;
s->dashlength *= 10.0; /* dot = 10% of len */
} else {
s->linestyle = DIA_LINE_STYLE_DASHED;
}
break;
case 4:
s->linestyle = DIA_LINE_STYLE_DASH_DOT;
break;
default :
/* If an odd number of values is provided, then the list of values is repeated to
* yield an even number of values. Thus, stroke-dasharray: 5,3,2 is equivalent to
* stroke-dasharray: 5,3,2,5,3,2.
*/
case 6:
s->linestyle = DIA_LINE_STYLE_DASH_DOT_DOT;
break;
}
g_strfreev (dashes);
if (end)
*end = ptr;
}
static void
_parse_linejoin (DiaSvgStyle *s, const char *val)
{
if (!strncmp (val, "miter", 5)) {
s->linejoin = DIA_LINE_JOIN_MITER;
} else if (!strncmp (val, "round", 5)) {
s->linejoin = DIA_LINE_JOIN_ROUND;
} else if (!strncmp (val, "bevel", 5)) {
s->linejoin = DIA_LINE_JOIN_BEVEL;
} else if (!strncmp (val, "default", 7)) {
s->linejoin = DIA_LINE_JOIN_DEFAULT;
}
}
static void
_parse_linecap (DiaSvgStyle *s, const char *val)
{
if (!strncmp(val, "butt", 4))
s->linecap = DIA_LINE_CAPS_BUTT;
else if (!strncmp(val, "round", 5))
s->linecap = DIA_LINE_CAPS_ROUND;
else if (!strncmp(val, "square", 6) || !strncmp(val, "projecting", 10))
s->linecap = DIA_LINE_CAPS_PROJECTING;
else if (!strncmp(val, "default", 7))
s->linecap = DIA_LINE_CAPS_DEFAULT;
}
/*!
* \brief Given any of the three parameters adjust the font
* @param s Dia SVG style to modify
* @param family comma-separated list of family names
* @param style font slant string
* @param weight font weight string
*/
static void
_style_adjust_font (DiaSvgStyle *s, const char *family, const char *style, const char *weight)
{
g_clear_object (&s->font);
/* given font_height is bogus, especially if not given at all
* or without unit ... see bug 665648 about invalid CSS
*/
s->font = dia_font_new_from_style (DIA_FONT_SANS, s->font_height > 0 ? s->font_height : 1.0);
if (family) {
/* SVG allows a list of families here, also there is some strange formatting
* seen, like 'Arial'. If the given family name can not be resolved by
* Pango it complaints loudly with g_warning().
*/
char **families = g_strsplit (family, ",", -1);
int i = 0;
gboolean found = FALSE;
while (!found && families[i]) {
const char *chomped = g_strchomp (g_strdelimit (families[i], "'", ' '));
PangoFont *loaded;
dia_font_set_any_family(s->font, chomped);
loaded = pango_context_load_font (dia_font_get_context (),
dia_font_get_description (s->font));
if (loaded) {
g_clear_object (&loaded);
found = TRUE;
}
++i;
}
if (!found) {
dia_font_set_any_family (s->font, "sans");
}
g_strfreev (families);
}
if (style) {
dia_font_set_slant_from_string (s->font, style);
}
if (weight) {
dia_font_set_weight_from_string (s->font, weight);
}
}
static void
_parse_text_align (DiaSvgStyle *s, const char *ptr)
{
if (!strncmp (ptr, "start", 5)) {
s->alignment = DIA_ALIGN_LEFT;
} else if (!strncmp (ptr, "end", 3)) {
s->alignment = DIA_ALIGN_RIGHT;
} else if (!strncmp (ptr, "middle", 6)) {
s->alignment = DIA_ALIGN_CENTRE;
}
}
/*!
* \brief Parse SVG/CSS style string
*
* Parse as much information from the given style string as Dia can handle.
* There are still known limitations:
* - does not follow references to somewhere inside or outside the string
* (e.g. url(...), color and currentColor)
* - font parsing should be extended to support lists of fonts somehow
*
* \ingroup DiaSvg
*/
void
dia_svg_parse_style_string (DiaSvgStyle *s, double user_scale, const char *str)
{
int i = 0;
char *ptr = (char *) str;
char *family = NULL, *style = NULL, *weight = NULL;
while (ptr[0] != '\0') {
/* skip white space at start */
while (ptr[0] != '\0' && g_ascii_isspace(ptr[0])) ptr++;
if (ptr[0] == '\0') break;
if (!strncmp("font-family:", ptr, 12)) {
ptr += 12;
while ((ptr[0] != '\0') && g_ascii_isspace(ptr[0])) ptr++;
i = 0;
while (ptr[i] != '\0' && ptr[i] != ';') ++i;
/* with i==0 we fall back to 'sans' too */
if (strncmp (ptr, "sanserif", i) == 0 || strncmp (ptr, "sans-serif", i) == 0)
family = g_strdup ("sans"); /* special name adaption */
else
family = i > 0 ? g_strndup(ptr, i) : NULL;
ptr += i;
} else if (!strncmp("font-weight:", ptr, 12)) {
ptr += 12;
while ((ptr[0] != '\0') && g_ascii_isspace(ptr[0])) ptr++;
i = 0;
while (ptr[i] != '\0' && ptr[i] != ';') ++i;
weight = i > 0 ? g_strndup (ptr, i) : NULL;
ptr += i;
} else if (!strncmp("font-style:", ptr, 11)) {
ptr += 11;
while ((ptr[0] != '\0') && g_ascii_isspace(ptr[0])) ptr++;
i = 0;
while (ptr[i] != '\0' && ptr[i] != ';') ++i;
style = i > 0 ? g_strndup(ptr, i) : NULL;
ptr += i;
} else if (!strncmp("font-size:", ptr, 10)) {
ptr += 10;
while ((ptr[0] != '\0') && g_ascii_isspace(ptr[0])) ptr++;
i = 0;
while (ptr[i] != '\0' && ptr[i] != ';') ++i;
s->font_height = g_ascii_strtod(ptr, NULL);
ptr += i;
if (user_scale > 0)
s->font_height /= user_scale;
} else if (!strncmp("text-anchor:", ptr, 12)) {
ptr += 12;
while ((ptr[0] != '\0') && g_ascii_isspace(ptr[0])) ptr++;
_parse_text_align(s, ptr);
} else if (!strncmp("stroke-width:", ptr, 13)) {
ptr += 13;
s->line_width = g_ascii_strtod(ptr, &ptr);
if (user_scale > 0)
s->line_width /= user_scale;
} else if (!strncmp("stroke:", ptr, 7)) {
ptr += 7;
while ((ptr[0] != '\0') && g_ascii_isspace(ptr[0])) ptr++;
if (ptr[0] == '\0') break;
if (!_parse_color (&s->stroke, ptr))
s->stroke = DIA_SVG_COLOUR_NONE;
} else if (!strncmp("stroke-opacity:", ptr, 15)) {
ptr += 15;
s->stroke_opacity = g_ascii_strtod(ptr, &ptr);
} else if (!strncmp("fill:", ptr, 5)) {
ptr += 5;
while (ptr[0] != '\0' && g_ascii_isspace(ptr[0])) ptr++;
if (ptr[0] == '\0') break;
if (!_parse_color (&s->fill, ptr))
s->fill = DIA_SVG_COLOUR_NONE;
} else if (!strncmp("fill-opacity:", ptr, 13)) {
ptr += 13;
s->fill_opacity = g_ascii_strtod(ptr, &ptr);
} else if (!strncmp("stop-color:", ptr, 11)) {
ptr += 11;
while (ptr[0] != '\0' && g_ascii_isspace(ptr[0])) ptr++;
if (ptr[0] == '\0') break;
if (!_parse_color (&s->stop_color, ptr))
s->stop_color = DIA_SVG_COLOUR_NONE;
} else if (!strncmp("stop-opacity:", ptr, 13)) {
ptr += 13;
s->stop_opacity = g_ascii_strtod(ptr, &ptr);
} else if (!strncmp("opacity", ptr, 7)) {
double opacity;
ptr += 7;
opacity = g_ascii_strtod(ptr, &ptr);
/* multiplicative effect of opacity */
s->stroke_opacity *= opacity;
s->fill_opacity *= opacity;
} else if (!strncmp("stroke-linecap:", ptr, 15)) {
ptr += 15;
while (ptr[0] != '\0' && g_ascii_isspace(ptr[0])) ptr++;
if (ptr[0] == '\0') break;
_parse_linecap (s, ptr);
} else if (!strncmp("stroke-linejoin:", ptr, 16)) {
ptr += 16;
while (ptr[0] != '\0' && g_ascii_isspace(ptr[0])) ptr++;
if (ptr[0] == '\0') break;
_parse_linejoin (s, ptr);
} else if (!strncmp("stroke-pattern:", ptr, 15)) {
/* Apparently not an offical SVG style attribute, but
* referenced in custom-shapes document. So we continue
* supporting it (read only).
*/
ptr += 15;
while (ptr[0] != '\0' && g_ascii_isspace(ptr[0])) ptr++;
if (ptr[0] == '\0') break;
if (!strncmp (ptr, "solid", 5)) {
s->linestyle = DIA_LINE_STYLE_SOLID;
} else if (!strncmp (ptr, "dashed", 6)) {
s->linestyle = DIA_LINE_STYLE_DASHED;
} else if (!strncmp (ptr, "dash-dot", 8)) {
s->linestyle = DIA_LINE_STYLE_DASH_DOT;
} else if (!strncmp (ptr, "dash-dot-dot", 12)) {
s->linestyle = DIA_LINE_STYLE_DASH_DOT_DOT;
} else if (!strncmp (ptr, "dotted", 6)) {
s->linestyle = DIA_LINE_STYLE_DOTTED;
} else if (!strncmp (ptr, "default", 7)) {
s->linestyle = DIA_LINE_STYLE_DEFAULT;
}
/* XXX: deal with a real pattern */
} else if (!strncmp ("stroke-dashlength:", ptr, 18)) {
ptr += 18;
while (ptr[0] != '\0' && g_ascii_isspace(ptr[0])) ptr++;
if (ptr[0] == '\0') break;
if (!strncmp(ptr, "default", 7))
s->dashlength = 1.0;
else {
s->dashlength = g_ascii_strtod(ptr, &ptr);
if (user_scale > 0)
s->dashlength /= user_scale;
}
} else if (!strncmp ("stroke-dasharray:", ptr, 17)) {
s->linestyle = DIA_LINE_STYLE_DASHED;
ptr += 17;
while (ptr[0] != '\0' && g_ascii_isspace(ptr[0])) ptr++;
if (ptr[0] == '\0') break;
if (!strncmp(ptr, "default", 7))
s->dashlength = 1.0;
else
_parse_dasharray (s, user_scale, ptr, &ptr);
}
/* skip up to the next attribute */
while (ptr[0] != '\0' && ptr[0] != ';' && ptr[0] != '\n') ptr++;
if (ptr[0] != '\0') ptr++;
}
if (family || style || weight) {
_style_adjust_font (s, family, style, weight);
g_clear_pointer (&family, g_free);
g_clear_pointer (&style, g_free);
g_clear_pointer (&weight, g_free);
}
}
/*!
* \brief Parse SVG style properties
*
* This function not only parses the style attribute of the given node
* it also extracts some of the style properties directly.
* @param node An XML node to parse a style from.
* @param s The SVG style object to fill out. This should previously be
* initialized to some default values.
* @param user_scale if >0 scalable values (font-size, stroke-width, ...)
* are divided by this, otherwise ignored
*
* \ingroup DiaSvg
*/
void
dia_svg_parse_style (xmlNodePtr node, DiaSvgStyle *s, double user_scale)
{
xmlChar *str;
str = xmlGetProp(node, (const xmlChar *)"style");
if (str) {
dia_svg_parse_style_string (s, user_scale, (char *)str);
xmlFree(str);
}
/* ugly svg variations, it is allowed to give style properties without
* the style attribute, i.e. direct attributes
*/
str = xmlGetProp(node, (const xmlChar *)"color");
if (str) {
int c;
if (_parse_color (&c, (char *) str))
_current_color = c;
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"opacity");
if (str) {
double opacity = g_ascii_strtod ((char *) str, NULL);
/* multiplicative effect of opacity */
s->stroke_opacity *= opacity;
s->fill_opacity *= opacity;
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"stop-color");
if (str) {
if (!_parse_color (&s->stop_color, (char *) str) && strcmp ((const char *) str, "inherit") != 0)
s->stop_color = DIA_SVG_COLOUR_NONE;
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"stop-opacity");
if (str) {
s->stop_opacity = g_ascii_strtod((char *) str, NULL);
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"fill");
if (str) {
if (!_parse_color (&s->fill, (char *) str) && strcmp ((const char *) str, "inherit") != 0)
s->fill = DIA_SVG_COLOUR_NONE;
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"fill-opacity");
if (str) {
s->fill_opacity = g_ascii_strtod((char *) str, NULL);
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"stroke");
if (str) {
if (!_parse_color (&s->stroke, (char *) str) && strcmp ((const char *) str, "inherit") != 0)
s->stroke = DIA_SVG_COLOUR_NONE;
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"stroke-opacity");
if (str) {
s->stroke_opacity = g_ascii_strtod((char *) str, NULL);
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"stroke-width");
if (str) {
s->line_width = g_ascii_strtod((char *) str, NULL);
xmlFree(str);
if (user_scale > 0)
s->line_width /= user_scale;
}
str = xmlGetProp(node, (const xmlChar *)"stroke-dasharray");
if (str) {
if (strcmp ((const char *)str, "inherit") != 0)
_parse_dasharray (s, user_scale, (char *)str, NULL);
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"stroke-linejoin");
if (str) {
if (strcmp ((const char *)str, "inherit") != 0)
_parse_linejoin (s, (char *)str);
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"stroke-linecap");
if (str) {
if (strcmp ((const char *)str, "inherit") != 0)
_parse_linecap (s, (char *)str);
xmlFree(str);
}
/* text-props, again ;( */
str = xmlGetProp(node, (const xmlChar *)"font-size");
if (str) {
/* for inherit we just leave the original value,
* should be initialized by parent style already
*/
if (strcmp ((const char *)str, "inherit") != 0) {
s->font_height = g_ascii_strtod ((char *)str, NULL);
if (user_scale > 0)
s->font_height /= user_scale;
}
xmlFree(str);
}
str = xmlGetProp(node, (const xmlChar *)"text-anchor");
if (str) {
_parse_text_align (s, (const char*) str);
xmlFree(str);
}
{
xmlChar *family = xmlGetProp(node, (const xmlChar *)"font-family");
xmlChar *slant = xmlGetProp(node, (const xmlChar *)"font-style");
xmlChar *weight = xmlGetProp(node, (const xmlChar *)"font-weight");
if (family || slant || weight) {
_style_adjust_font (s, (char *)family, (char *)slant, (char *)weight);
if (family)
xmlFree(family);
if (slant)
xmlFree(slant);
if (weight)
xmlFree(weight);
}
}
}
/**
* _path_arc_segment:
* @points: destination array of #BezPoint
* @xc: center x
* @yc: center y
* @th0: first angle
* @th1: second angle
* @rx: radius x
* @ry: radius y
* @x_axis_rotation: rotation of the axis
* @last_p2: the resulting current point
*
* Parse a SVG description of an arc segment.
*
* Code stolen from (and adapted)
* http://www.inkscape.org/doc/doxygen/html/svg-path_8cpp.php#a7
* which may have got it from rsvg, hope it is correct ;)
*
* If you want the description of the algorithm read the SVG specs:
* http://www.w3.org/TR/SVG/paths.html#PathDataEllipticalArcCommands
*/
static void
_path_arc_segment (GArray *points,
double xc,
double yc,
double th0,
double th1,
double rx,
double ry,
double x_axis_rotation,
Point *last_p2)
{
BezPoint bez;
double sin_th, cos_th;
double a00, a01, a10, a11;
double x1, y1, x2, y2, x3, y3;
double t;
double th_half;
sin_th = sin (x_axis_rotation * (M_PI / 180.0));
cos_th = cos (x_axis_rotation * (M_PI / 180.0));
/* inverse transform compared with rsvg_path_arc */
a00 = cos_th * rx;
a01 = -sin_th * ry;
a10 = sin_th * rx;
a11 = cos_th * ry;
th_half = 0.5 * (th1 - th0);
t = (8.0 / 3.0) * sin(th_half * 0.5) * sin(th_half * 0.5) / sin(th_half);
x1 = xc + cos (th0) - t * sin (th0);
y1 = yc + sin (th0) + t * cos (th0);
x3 = xc + cos (th1);
y3 = yc + sin (th1);
x2 = x3 + t * sin (th1);
y2 = y3 - t * cos (th1);
bez.type = BEZ_CURVE_TO;
bez.p1.x = a00 * x1 + a01 * y1;
bez.p1.y = a10 * x1 + a11 * y1;
bez.p2.x = a00 * x2 + a01 * y2;
bez.p2.y = a10 * x2 + a11 * y2;
bez.p3.x = a00 * x3 + a01 * y3;
bez.p3.y = a10 * x3 + a11 * y3;
*last_p2 = bez.p2;
g_array_append_val(points, bez);
}
/*
* Parse an SVG description of a full arc.
*/
static void
_path_arc (GArray *points,
double cpx,
double cpy,
double rx,
double ry,
double x_axis_rotation,
int large_arc_flag,
int sweep_flag,
double x,
double y,
Point *last_p2)
{
double sin_th, cos_th;
double a00, a01, a10, a11;
double x0, y0, x1, y1, xc, yc;
double d, sfactor, sfactor_sq;
double th0, th1, th_arc;
double px, py, pl;
int i, n_segs;
sin_th = sin (x_axis_rotation * (M_PI / 180.0));
cos_th = cos (x_axis_rotation * (M_PI / 180.0));
/*
* Correction of out-of-range radii as described in Appendix F.6.6:
*
* 1. Ensure radii are non-zero (Done?).
* 2. Ensure that radii are positive.
* 3. Ensure that radii are large enough.
*/
if(rx < 0.0) rx = -rx;
if(ry < 0.0) ry = -ry;
px = cos_th * (cpx - x) * 0.5 + sin_th * (cpy - y) * 0.5;
py = cos_th * (cpy - y) * 0.5 - sin_th * (cpx - x) * 0.5;
pl = (px * px) / (rx * rx) + (py * py) / (ry * ry);
if(pl > 1.0)
{
pl = sqrt(pl);
rx *= pl;
ry *= pl;
}
/* Proceed with computations as described in Appendix F.6.5 */
a00 = cos_th / rx;
a01 = sin_th / rx;
a10 = -sin_th / ry;
a11 = cos_th / ry;
x0 = a00 * cpx + a01 * cpy;
y0 = a10 * cpx + a11 * cpy;
x1 = a00 * x + a01 * y;
y1 = a10 * x + a11 * y;
/* (x0, y0) is current point in transformed coordinate space.
(x1, y1) is new point in transformed coordinate space.
The arc fits a unit-radius circle in this space.
*/
d = (x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0);
sfactor_sq = 1.0 / d - 0.25;
if (sfactor_sq < 0) sfactor_sq = 0;
sfactor = sqrt (sfactor_sq);
if (sweep_flag == large_arc_flag) sfactor = -sfactor;
xc = 0.5 * (x0 + x1) - sfactor * (y1 - y0);
yc = 0.5 * (y0 + y1) + sfactor * (x1 - x0);
/* (xc, yc) is center of the circle. */
th0 = atan2 (y0 - yc, x0 - xc);
th1 = atan2 (y1 - yc, x1 - xc);
th_arc = th1 - th0;
if (th_arc < 0 && sweep_flag)
th_arc += 2 * M_PI;
else if (th_arc > 0 && !sweep_flag)
th_arc -= 2 * M_PI;
n_segs = (int) ceil (fabs (th_arc / (M_PI * 0.5 + 0.001)));
for (i = 0; i < n_segs; i++) {
_path_arc_segment(points, xc, yc,
th0 + i * th_arc / n_segs,
th0 + (i + 1) * th_arc / n_segs,
rx, ry, x_axis_rotation,
last_p2);
}
}
/* routine to chomp off the start of the string */
#define path_chomp(path) while (path[0]!='\0'&&strchr(" \t\n\r,", path[0])) path++
/**
* dia_svg_parse_path:
* @path_str: A string describing an SVG path.
* @unparsed: The position in @path_str where parsing ended, or %NULL if
* the string was completely parsed. This should be used for
* calling the function until it is fully parsed.
* @closed: Whether the path was closed.
* @current_point: to retain it over splitting
*
* Takes SVG path content and converts it in an array of BezPoint.
*
* SVG pathes can contain multiple MOVE_TO commands while Dia's bezier
* object can only contain one so you may need to call this function
* multiple times.
*
* @bug This function is way too long (324 lines). So dont touch it. please!
* Shouldn't we try to turn straight lines, simple arc, polylines and
* zigzaglines into their appropriate objects? Could either be done by
* returning an object or by having functions that try parsing as
* specific simple paths.
* NOPE: Dia is capable to handle beziers and the file has given us some so
* WHY should be break it in to pieces ???
*
* Returns: %TRUE if there is any useful data in parsed to points
*/
gboolean
dia_svg_parse_path (GArray *points,
const char *path_str,
char **unparsed,
gboolean *closed,
Point *current_point)
{
enum {
PATH_MOVE, PATH_LINE, PATH_HLINE, PATH_VLINE, PATH_CURVE,
PATH_SMOOTHCURVE, PATH_QUBICCURVE, PATH_TTQCURVE,
PATH_ARC, PATH_CLOSE, PATH_END } last_type = PATH_MOVE;
Point last_open = {0.0, 0.0};
Point last_point = {0.0, 0.0};
Point last_control = {0.0, 0.0};
gboolean last_relative = FALSE;
BezPoint bez = { 0, };
char *path = (char *)path_str;
gboolean need_next_element = FALSE;
/* we can grow the same array in multiple steps */
gsize points_at_start = points->len;
*closed = FALSE;
*unparsed = NULL;
/* when splitting into pieces, we have to maintain current_point accross them */
if (current_point)
last_point = *current_point;
path_chomp(path);
while (path[0] != '\0') {
#ifdef DEBUG_CUSTOM
g_printerr ("Path: %s\n", path);
#endif
/* check for a new command */
switch (path[0]) {
case 'M':
if (points->len - points_at_start > 0) {
need_next_element = TRUE;
goto MORETOPARSE;
}
path++;
path_chomp(path);
last_type = PATH_MOVE;
last_relative = FALSE;
break;
case 'm':
if (points->len - points_at_start > 0) {
need_next_element = TRUE;
goto MORETOPARSE;
}
path++;
path_chomp(path);
last_type = PATH_MOVE;
last_relative = TRUE;
break;
case 'L':
path++;
path_chomp(path);
last_type = PATH_LINE;
last_relative = FALSE;
break;
case 'l':
path++;
path_chomp(path);
last_type = PATH_LINE;
last_relative = TRUE;
break;
case 'H':
path++;
path_chomp(path);
last_type = PATH_HLINE;
last_relative = FALSE;
break;
case 'h':
path++;
path_chomp(path);
last_type = PATH_HLINE;
last_relative = TRUE;
break;
case 'V':
path++;
path_chomp(path);
last_type = PATH_VLINE;
last_relative = FALSE;
break;
case 'v':
path++;
path_chomp(path);
last_type = PATH_VLINE;
last_relative = TRUE;
break;
case 'C':
path++;
path_chomp(path);
last_type = PATH_CURVE;
last_relative = FALSE;
break;
case 'c':
path++;
path_chomp(path);
last_type = PATH_CURVE;
last_relative = TRUE;
break;
case 'S':
path++;
path_chomp(path);
last_type = PATH_SMOOTHCURVE;
last_relative = FALSE;
break;
case 's':
path++;
path_chomp(path);
last_type = PATH_SMOOTHCURVE;
last_relative = TRUE;
break;
case 'q':
path++;
path_chomp(path);
last_type = PATH_QUBICCURVE;
last_relative = TRUE;
break;
case 'Q':
path++;
path_chomp(path);
last_type = PATH_QUBICCURVE;
last_relative = FALSE;
break;
case 't':
path++;
path_chomp(path);
last_type = PATH_TTQCURVE;
last_relative = TRUE;
break;
case 'T':
path++;
path_chomp(path);
last_type = PATH_TTQCURVE;
last_relative = FALSE;
break;
case 'Z':
case 'z':
path++;
path_chomp(path);
last_type = PATH_CLOSE;
last_relative = FALSE;
break;
case 'A':
path++;
path_chomp(path);
last_type = PATH_ARC;
last_relative = FALSE;
break;
case 'a':
path++;
path_chomp(path);
last_type = PATH_ARC;
last_relative = TRUE;
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
case '.':
case '+':
case '-':
if (last_type == PATH_CLOSE) {
g_warning("parse_path: argument given for implicite close path");
/* consume one number so we don't fall into an infinite loop */
while (path != NULL && strchr("0123456789.+-", path[0])) path++;
path_chomp(path);
*closed = TRUE;
need_next_element = TRUE;
goto MORETOPARSE;
}
break;
default:
g_warning("unsupported path code '%c'", path[0]);
last_type = PATH_END;
path++;
path_chomp(path);
break;
}
/* actually parse the path component */
switch (last_type) {
case PATH_MOVE:
if (points->len - points_at_start > 1) {
g_warning ("Only first point should be 'move'");
}
bez.type = BEZ_MOVE_TO;
bez.p1.x = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p1.y = g_ascii_strtod (path, &path);
path_chomp (path);
if (last_relative) {
bez.p1.x += last_point.x;
bez.p1.y += last_point.y;
}
last_point = bez.p1;
last_control = bez.p1;
last_open = bez.p1;
if (points->len - points_at_start == 1) {
/* stupid svg, but we can handle it */
g_array_index (points, BezPoint, 0) = bez;
} else {
g_array_append_val (points, bez);
}
/* [SVG11 8.3.2] If a moveto is followed by multiple pairs of coordinates,
* the subsequent pairs are treated as implicit lineto commands
*/
last_type = PATH_LINE;
break;
case PATH_LINE:
bez.type = BEZ_LINE_TO;
bez.p1.x = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p1.y = g_ascii_strtod (path, &path);
path_chomp (path);
if (last_relative) {
bez.p1.x += last_point.x;
bez.p1.y += last_point.y;
}
/* Strictly speeaking it should not be necessary to assign the other
* two points. But it helps hiding a serious limitation with the
* standard bezier serialization, namely only saving one move-to
* and the rest as curve-to */
#define INIT_LINE_TO_AS_CURVE_TO bez.p3 = bez.p1; bez.p2 = last_point
INIT_LINE_TO_AS_CURVE_TO;
last_point = bez.p1;
last_control = bez.p1;
g_array_append_val (points, bez);
break;
case PATH_HLINE:
bez.type = BEZ_LINE_TO;
bez.p1.x = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p1.y = last_point.y;
if (last_relative) {
bez.p1.x += last_point.x;
}
INIT_LINE_TO_AS_CURVE_TO;
last_point = bez.p1;
last_control = bez.p1;
g_array_append_val (points, bez);
break;
case PATH_VLINE:
bez.type = BEZ_LINE_TO;
bez.p1.x = last_point.x;
bez.p1.y = g_ascii_strtod (path, &path);
path_chomp (path);
if (last_relative) {
bez.p1.y += last_point.y;
}
INIT_LINE_TO_AS_CURVE_TO;
#undef INIT_LINE_TO_AS_CURVE_TO
last_point = bez.p1;
last_control = bez.p1;
g_array_append_val (points, bez);
break;
case PATH_CURVE:
bez.type = BEZ_CURVE_TO;
bez.p1.x = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p1.y = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p2.x = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p2.y = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p3.x = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p3.y = g_ascii_strtod (path, &path);
path_chomp (path);
if (last_relative) {
bez.p1.x += last_point.x;
bez.p1.y += last_point.y;
bez.p2.x += last_point.x;
bez.p2.y += last_point.y;
bez.p3.x += last_point.x;
bez.p3.y += last_point.y;
}
last_point = bez.p3;
last_control = bez.p2;
g_array_append_val (points, bez);
break;
case PATH_SMOOTHCURVE:
bez.type = BEZ_CURVE_TO;
bez.p1.x = 2 * last_point.x - last_control.x;
bez.p1.y = 2 * last_point.y - last_control.y;
bez.p2.x = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p2.y = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p3.x = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p3.y = g_ascii_strtod (path, &path);
path_chomp (path);
if (last_relative) {
bez.p2.x += last_point.x;
bez.p2.y += last_point.y;
bez.p3.x += last_point.x;
bez.p3.y += last_point.y;
}
last_point = bez.p3;
last_control = bez.p2;
g_array_append_val (points, bez);
break;
case PATH_QUBICCURVE: {
/* raise quadratic bezier to cubic (copied from librsvg) */
double x1, y1;
x1 = g_ascii_strtod (path, &path);
path_chomp (path);
y1 = g_ascii_strtod (path, &path);
path_chomp (path);
if (last_relative) {
x1 += last_point.x;
y1 += last_point.y;
}
bez.type = BEZ_CURVE_TO;
bez.p1.x = (last_point.x + 2 * x1) * (1.0 / 3.0);
bez.p1.y = (last_point.y + 2 * y1) * (1.0 / 3.0);
bez.p3.x = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p3.y = g_ascii_strtod (path, &path);
path_chomp (path);
if (last_relative) {
bez.p3.x += last_point.x;
bez.p3.y += last_point.y;
}
bez.p2.x = (bez.p3.x + 2 * x1) * (1.0 / 3.0);
bez.p2.y = (bez.p3.y + 2 * y1) * (1.0 / 3.0);
last_point = bez.p3;
last_control.x = x1;
last_control.y = y1;
g_array_append_val (points, bez);
}
break;
case PATH_TTQCURVE:
{
/* Truetype quadratic bezier curveto */
double xc, yc; /* quadratic control point */
xc = 2 * last_point.x - last_control.x;
yc = 2 * last_point.y - last_control.y;
/* generate a quadratic bezier with control point = xc, yc */
bez.type = BEZ_CURVE_TO;
bez.p1.x = (last_point.x + 2 * xc) * (1.0 / 3.0);
bez.p1.y = (last_point.y + 2 * yc) * (1.0 / 3.0);
bez.p3.x = g_ascii_strtod (path, &path);
path_chomp (path);
bez.p3.y = g_ascii_strtod (path, &path);
path_chomp (path);
if (last_relative) {
bez.p3.x += last_point.x;
bez.p3.y += last_point.y;
}
bez.p2.x = (bez.p3.x + 2 * xc) * (1.0 / 3.0);
bez.p2.y = (bez.p3.y + 2 * yc) * (1.0 / 3.0);
last_point = bez.p3;
last_control.x = xc;
last_control.y = yc;
g_array_append_val (points, bez);
}
break;
case PATH_ARC:
{
double rx, ry;
double xrot;
int largearc, sweep;
Point dest, dest_c;
dest_c.x=0;
dest_c.y=0;
rx = g_ascii_strtod (path, &path);
path_chomp (path);
ry = g_ascii_strtod (path, &path);
path_chomp (path);
#if 1 /* ok if it is all properly separated */
xrot = g_ascii_strtod (path, &path);
path_chomp (path);
largearc = (int) g_ascii_strtod (path, &path);
path_chomp (path);
sweep = (int) g_ascii_strtod (path, &path);
path_chomp (path);
#else
/* Actually three flags, which might not be properly separated,
* but even with this paths-data-20-f.svg does not work. IMHO the
* test case is seriously borked and can only pass if parsing
* the arc is tweaked against the test. In other words that test
* looks like it is built against one specific implementation.
* Inkscape and librsvg fail, Firefox pass.
*/
xrot = path[0] == '0' ? 0.0 : 1.0; ++path;
path_chomp(path);
largearc = path[0] == '0' ? 0 : 1; ++path;
path_chomp(path);
sweep = path[0] == '0' ? 0 : 1; ++path;
path_chomp(path);
#endif
dest.x = g_ascii_strtod (path, &path);
path_chomp (path);
dest.y = g_ascii_strtod (path, &path);
path_chomp (path);
if (last_relative) {
dest.x += last_point.x;
dest.y += last_point.y;
}
/* avoid matherr with bogus values - just ignore them
* does happen e.g. with 'Chem-Widgets - clamp-large'
*/
if (last_point.x != dest.x || last_point.y != dest.y) {
_path_arc (points, last_point.x, last_point.y,
rx, ry, xrot, largearc, sweep, dest.x, dest.y,
&dest_c);
}
last_point = dest;
last_control = dest_c;
}
break;
case PATH_CLOSE:
/* close the path with a line - second condition to ignore single close */
if (!*closed && (points->len != points_at_start)) {
const BezPoint *bpe = &g_array_index (points, BezPoint, points->len-1);
/* if the last point already meets the first point dont add it again */
const Point pte = bpe->type == BEZ_CURVE_TO ? bpe->p3 : bpe->p1;
if (pte.x != last_open.x || pte.y != last_open.y) {
bez.type = BEZ_LINE_TO;
bez.p1 = last_open;
g_array_append_val (points, bez);
}
last_point = last_open;
}
*closed = TRUE;
need_next_element = TRUE;
break;
case PATH_END:
while (*path != '\0') {
path++;
}
need_next_element = FALSE;
break;
default:
g_return_val_if_reached (FALSE);
}
/* get rid of any ignorable characters */
path_chomp (path);
MORETOPARSE:
if (need_next_element) {
/* check if there really is more to be parsed */
if (path[0] != 0) {
*unparsed = path;
} else {
*unparsed = NULL;
}
break; /* while */
}
}
/* avoid returning an array with only one point (I'd say the exporter
* producing such is rather broken, but *our* bezier creation code
* would crash on it.
*/
if (points->len < 2) {
g_array_set_size (points, 0);
}
if (current_point) {
*current_point = last_point;
}
return (points->len > 1);
}
static gboolean
_parse_transform (const char *trans, graphene_matrix_t *m, double scale)
{
char **list;
char *p = strchr (trans, '(');
int i = 0;
while ( (*trans != '\0')
&& (*trans == ' ' || *trans == ',' || *trans == '\t' ||
*trans == '\n' || *trans == '\r')) {
++trans; /* skip whitespace */
}
if (!p || !*trans) {
return FALSE; /* silently fail */
}
list = g_regex_split_simple ("[\\s,]+", p + 1, 0, 0);
if (strncmp (trans, "matrix", 6) == 0) {
float xx = 0, yx = 0, xy = 0, yy = 0, x0 = 0, y0 = 0;
if (list[i]) {
xx = g_ascii_strtod (list[i], NULL);
++i;
}
if (list[i]) {
yx = g_ascii_strtod (list[i], NULL);
++i;
}
if (list[i]) {
xy = g_ascii_strtod (list[i], NULL);
++i;
}
if (list[i]) {
yy = g_ascii_strtod (list[i], NULL);
++i;
}
if (list[i]) {
x0 = g_ascii_strtod (list[i], NULL);
++i;
}
if (list[i]) {
y0 = g_ascii_strtod (list[i], NULL);
++i;
}
graphene_matrix_init_from_2d (m, xx, yx, xy, yy, x0 / scale, y0 / scale);
} else if (strncmp (trans, "translate", 9) == 0) {
double x0 = 0, y0 = 0;
if (list[i]) {
x0 = g_ascii_strtod (list[i], NULL);
++i;
}
if (list[i]) {
y0 = g_ascii_strtod (list[i], NULL);
++i;
}
graphene_matrix_init_translate (m, &GRAPHENE_POINT3D_INIT (x0 / scale, y0 / scale, 0));
} else if (strncmp (trans, "scale", 5) == 0) {
double xx = 0, yy = 0;
if (list[i]) {
xx = g_ascii_strtod (list[i], NULL);
++i;
}
if (list[i]) {
yy = g_ascii_strtod (list[i], NULL);
++i;
} else {
yy = xx;
}
graphene_matrix_init_scale (m, xx, yy, 1.0);
} else if (strncmp (trans, "rotate", 6) == 0) {
double angle = 0;
double cx = 0, cy = 0;
if (list[i]) {
angle = g_ascii_strtod (list[i], NULL);
++i;
} else {
g_warning ("transform=rotate no angle?");
}
/* FIXME: check with real world data, I'm uncertain */
/* rotate around the given offset */
if (list[i]) {
graphene_point3d_t point;
cx = g_ascii_strtod (list[i], NULL);
++i;
if (list[i]) {
cy = g_ascii_strtod (list[i], NULL);
++i;
} else {
cy = 0.0; /* if offsets don't come in pairs */
}
/* translate by -cx,-cy */
graphene_point3d_init (&point, -(cx / scale), -(cy / scale), 0);
graphene_matrix_init_translate (m, &point);
/* rotate by angle */
graphene_matrix_rotate_z (m, angle);
/* translate by cx,cy */
graphene_point3d_init (&point, cx / scale, cy / scale, 0);
graphene_matrix_translate (m, &point);
} else {
graphene_matrix_init_rotate (m, angle, graphene_vec3_z_axis ());
}
} else if (strncmp (trans, "skewX", 5) == 0) {
float skew = 0;
if (list[i]) {
skew = g_ascii_strtod (list[i], NULL);
}
graphene_matrix_init_skew (m, DIA_RADIANS (skew), 0);
} else if (strncmp (trans, "skewY", 5) == 0) {
float skew = 0;
if (list[i]) {
skew = g_ascii_strtod (list[i], NULL);
}
graphene_matrix_init_skew (m, 0, DIA_RADIANS (skew));
} else {
g_warning ("SVG: %s?", trans);
return FALSE;
}
g_clear_pointer (&list, g_strfreev);
return TRUE;
}
graphene_matrix_t *
dia_svg_parse_transform (const char *trans, double scale)
{
graphene_matrix_t *m = NULL;
char **transforms = g_regex_split_simple ("\\)", trans, 0, 0);
int i = 0;
/* go through the list of transformations - not that one would be enough ;) */
while (transforms[i]) {
graphene_matrix_t mat;
if (_parse_transform (transforms[i], &mat, scale)) {
if (!m) {
m = graphene_matrix_alloc ();
graphene_matrix_init_from_matrix (m, &mat);
} else {
graphene_matrix_multiply (m, &mat, m);
}
}
++i;
}
g_clear_pointer (&transforms, g_strfreev);
return m;
}
char *
dia_svg_from_matrix (const graphene_matrix_t *matrix, double scale)
{
/* transform="matrix(1,0,0,1,0,0)" */
char buf[G_ASCII_DTOSTR_BUF_SIZE];
GString *sm = g_string_new ("matrix(");
char *s;
g_ascii_formatd (buf,
sizeof (buf),
"%g",
graphene_matrix_get_value (matrix, 0, 0));
g_string_append (sm, buf);
g_string_append (sm, ",");
g_ascii_formatd (buf,
sizeof (buf),
"%g",
graphene_matrix_get_value (matrix, 0, 1));
g_string_append (sm, buf);
g_string_append (sm, ",");
g_ascii_formatd (buf,
sizeof (buf),
"%g",
graphene_matrix_get_value (matrix, 1, 0));
g_string_append (sm, buf);
g_string_append (sm, ",");
g_ascii_formatd (buf,
sizeof (buf),
"%g",
graphene_matrix_get_value (matrix, 1, 1));
g_string_append (sm, buf);
g_string_append (sm, ",");
g_ascii_formatd (buf,
sizeof (buf),
"%g",
graphene_matrix_get_x_translation (matrix) * scale);
g_string_append (sm, buf);
g_string_append (sm, ",");
g_ascii_formatd (buf,
sizeof (buf),
"%g",
graphene_matrix_get_y_translation (matrix) * scale);
g_string_append (sm, buf);
g_string_append (sm, ")");
s = sm->str;
g_string_free (sm, FALSE);
return s;
}
| GNOME/dia | lib/dia_svg.c | C | gpl-2.0 | 53,495 |
/* This program was written with lots of love under the GPL by Jonathan
* Blandford <jrb@gnome.org>
*/
#include <config.h>
#include <stdlib.h>
#include <string.h>
#include <gtk/gtk.h>
#include <gio/gio.h>
#include <gdk/gdkx.h>
#include <X11/Xatom.h>
#include <glib/gi18n.h>
#include <gdk/gdkkeysyms.h>
#if GTK_CHECK_VERSION (3, 0, 0)
#include <gdk/gdkkeysyms-compat.h>
#endif
#include "wm-common.h"
#include "capplet-util.h"
#include "eggcellrendererkeys.h"
#include "activate-settings-daemon.h"
#include "dconf-util.h"
#define GSETTINGS_KEYBINDINGS_DIR "/org/mate/desktop/keybindings/"
#define CUSTOM_KEYBINDING_SCHEMA "org.mate.control-center.keybinding"
#define MAX_ELEMENTS_BEFORE_SCROLLING 10
#define MAX_CUSTOM_SHORTCUTS 1000
#define RESPONSE_ADD 0
#define RESPONSE_REMOVE 1
typedef struct {
/* The untranslated name, combine with ->package to translate */
char *name;
/* The gettext package to use to translate the section title */
char *package;
/* Name of the window manager the keys would apply to */
char *wm_name;
/* The GSettings schema for the whole file */
char *schema;
/* an array of KeyListEntry */
GArray *entries;
} KeyList;
typedef enum {
COMPARISON_NONE = 0,
COMPARISON_GT,
COMPARISON_LT,
COMPARISON_EQ
} Comparison;
typedef struct
{
char *gsettings_path;
char *schema;
char *name;
int value;
char *value_schema; /* gsettings schema for key/value */
char *value_key;
char *description;
char *description_key;
char *cmd_key;
Comparison comparison;
} KeyListEntry;
enum
{
DESCRIPTION_COLUMN,
KEYENTRY_COLUMN,
N_COLUMNS
};
typedef struct
{
GSettings *settings;
char *gsettings_path;
char *gsettings_key;
guint keyval;
guint keycode;
EggVirtualModifierType mask;
gboolean editable;
GtkTreeModel *model;
char *description;
char *desc_gsettings_key;
gboolean desc_editable;
char *command;
char *cmd_gsettings_key;
gboolean cmd_editable;
gulong gsettings_cnxn;
gulong gsettings_cnxn_desc;
gulong gsettings_cnxn_cmd;
} KeyEntry;
static gboolean block_accels = FALSE;
static GtkWidget *custom_shortcut_dialog = NULL;
static GtkWidget *custom_shortcut_name_entry = NULL;
static GtkWidget *custom_shortcut_command_entry = NULL;
static GtkWidget* _gtk_builder_get_widget(GtkBuilder* builder, const gchar* name)
{
return GTK_WIDGET (gtk_builder_get_object (builder, name));
}
static GtkBuilder *
create_builder (void)
{
GtkBuilder *builder = gtk_builder_new();
GError *error = NULL;
static const gchar *uifile = MATECC_UI_DIR "/mate-keybinding-properties.ui";
if (gtk_builder_add_from_file (builder, uifile, &error) == 0) {
g_warning ("Could not load UI: %s", error->message);
g_error_free (error);
g_object_unref (builder);
builder = NULL;
}
return builder;
}
static char* binding_name(guint keyval, guint keycode, EggVirtualModifierType mask, gboolean translate)
{
if (keyval != 0 || keycode != 0)
{
if (translate)
{
return egg_virtual_accelerator_label (keyval, keycode, mask);
}
else
{
return egg_virtual_accelerator_name (keyval, keycode, mask);
}
}
else
{
return g_strdup (translate ? _("Disabled") : "");
}
}
static gboolean
binding_from_string (const char *str,
guint *accelerator_key,
guint *keycode,
EggVirtualModifierType *accelerator_mods)
{
g_return_val_if_fail (accelerator_key != NULL, FALSE);
if (str == NULL || strcmp (str, "disabled") == 0)
{
*accelerator_key = 0;
*keycode = 0;
*accelerator_mods = 0;
return TRUE;
}
egg_accelerator_parse_virtual (str, accelerator_key, keycode, accelerator_mods);
if (*accelerator_key == 0)
return FALSE;
else
return TRUE;
}
static void
accel_set_func (GtkTreeViewColumn *tree_column,
GtkCellRenderer *cell,
GtkTreeModel *model,
GtkTreeIter *iter,
gpointer data)
{
KeyEntry *key_entry;
gtk_tree_model_get (model, iter,
KEYENTRY_COLUMN, &key_entry,
-1);
if (key_entry == NULL)
g_object_set (cell,
"visible", FALSE,
NULL);
else if (! key_entry->editable)
g_object_set (cell,
"visible", TRUE,
"editable", FALSE,
"accel_key", key_entry->keyval,
"accel_mask", key_entry->mask,
"keycode", key_entry->keycode,
"style", PANGO_STYLE_ITALIC,
NULL);
else
g_object_set (cell,
"visible", TRUE,
"editable", TRUE,
"accel_key", key_entry->keyval,
"accel_mask", key_entry->mask,
"keycode", key_entry->keycode,
"style", PANGO_STYLE_NORMAL,
NULL);
}
static void
description_set_func (GtkTreeViewColumn *tree_column,
GtkCellRenderer *cell,
GtkTreeModel *model,
GtkTreeIter *iter,
gpointer data)
{
KeyEntry *key_entry;
gtk_tree_model_get (model, iter,
KEYENTRY_COLUMN, &key_entry,
-1);
if (key_entry != NULL)
g_object_set (cell,
"editable", FALSE,
"text", key_entry->description != NULL ?
key_entry->description : _("<Unknown Action>"),
NULL);
else
g_object_set (cell,
"editable", FALSE, NULL);
}
static gboolean
keybinding_key_changed_foreach (GtkTreeModel *model,
GtkTreePath *path,
GtkTreeIter *iter,
gpointer user_data)
{
KeyEntry *key_entry;
KeyEntry *tmp_key_entry;
key_entry = (KeyEntry *)user_data;
gtk_tree_model_get (key_entry->model, iter,
KEYENTRY_COLUMN, &tmp_key_entry,
-1);
if (key_entry == tmp_key_entry)
{
gtk_tree_model_row_changed (key_entry->model, path, iter);
return TRUE;
}
return FALSE;
}
static void
keybinding_key_changed (GSettings *settings,
gchar *key,
KeyEntry *key_entry)
{
gchar *key_value;
key_value = g_settings_get_string (settings, key);
binding_from_string (key_value, &key_entry->keyval, &key_entry->keycode, &key_entry->mask);
key_entry->editable = g_settings_is_writable (settings, key);
/* update the model */
gtk_tree_model_foreach (key_entry->model, keybinding_key_changed_foreach, key_entry);
}
static void
keybinding_description_changed (GSettings *settings,
gchar *key,
KeyEntry *key_entry)
{
gchar *key_value;
key_value = g_settings_get_string (settings, key);
g_free (key_entry->description);
key_entry->description = key_value ? g_strdup (key_value) : NULL;
g_free (key_value);
key_entry->desc_editable = g_settings_is_writable (settings, key);
/* update the model */
gtk_tree_model_foreach (key_entry->model, keybinding_key_changed_foreach, key_entry);
}
static void
keybinding_command_changed (GSettings *settings,
gchar *key,
KeyEntry *key_entry)
{
gchar *key_value;
key_value = g_settings_get_string (settings, key);
g_free (key_entry->command);
key_entry->command = key_value ? g_strdup (key_value) : NULL;
key_entry->cmd_editable = g_settings_is_writable (settings, key);
g_free (key_value);
/* update the model */
gtk_tree_model_foreach (key_entry->model, keybinding_key_changed_foreach, key_entry);
}
static int
keyentry_sort_func (GtkTreeModel *model,
GtkTreeIter *a,
GtkTreeIter *b,
gpointer user_data)
{
KeyEntry *key_entry_a;
KeyEntry *key_entry_b;
int retval;
key_entry_a = NULL;
gtk_tree_model_get (model, a,
KEYENTRY_COLUMN, &key_entry_a,
-1);
key_entry_b = NULL;
gtk_tree_model_get (model, b,
KEYENTRY_COLUMN, &key_entry_b,
-1);
if (key_entry_a && key_entry_b)
{
if ((key_entry_a->keyval || key_entry_a->keycode) &&
(key_entry_b->keyval || key_entry_b->keycode))
{
gchar *name_a, *name_b;
name_a = binding_name (key_entry_a->keyval,
key_entry_a->keycode,
key_entry_a->mask,
TRUE);
name_b = binding_name (key_entry_b->keyval,
key_entry_b->keycode,
key_entry_b->mask,
TRUE);
retval = g_utf8_collate (name_a, name_b);
g_free (name_a);
g_free (name_b);
}
else if (key_entry_a->keyval || key_entry_a->keycode)
retval = -1;
else if (key_entry_b->keyval || key_entry_b->keycode)
retval = 1;
else
retval = 0;
}
else if (key_entry_a)
retval = -1;
else if (key_entry_b)
retval = 1;
else
retval = 0;
return retval;
}
static void
clear_old_model (GtkBuilder *builder)
{
GtkWidget *tree_view;
GtkWidget *actions_swindow;
GtkTreeModel *model;
tree_view = _gtk_builder_get_widget (builder, "shortcut_treeview");
model = gtk_tree_view_get_model (GTK_TREE_VIEW (tree_view));
if (model == NULL)
{
/* create a new model */
model = (GtkTreeModel *) gtk_tree_store_new (N_COLUMNS, G_TYPE_STRING, G_TYPE_POINTER);
gtk_tree_sortable_set_sort_func (GTK_TREE_SORTABLE (model),
KEYENTRY_COLUMN,
keyentry_sort_func,
NULL, NULL);
gtk_tree_view_set_model (GTK_TREE_VIEW (tree_view), model);
g_object_unref (model);
}
else
{
/* clear the existing model */
gboolean valid;
GtkTreeIter iter;
KeyEntry *key_entry;
for (valid = gtk_tree_model_get_iter_first (model, &iter);
valid;
valid = gtk_tree_model_iter_next (model, &iter))
{
gtk_tree_model_get (model, &iter,
KEYENTRY_COLUMN, &key_entry,
-1);
if (key_entry != NULL)
{
g_signal_handler_disconnect (key_entry->settings, key_entry->gsettings_cnxn);
if (key_entry->gsettings_cnxn_desc != 0)
g_signal_handler_disconnect (key_entry->settings, key_entry->gsettings_cnxn_desc);
if (key_entry->gsettings_cnxn_cmd != 0)
g_signal_handler_disconnect (key_entry->settings, key_entry->gsettings_cnxn_cmd);
g_object_unref (key_entry->settings);
if (key_entry->gsettings_path)
g_free (key_entry->gsettings_path);
g_free (key_entry->gsettings_key);
g_free (key_entry->description);
g_free (key_entry->desc_gsettings_key);
g_free (key_entry->command);
g_free (key_entry->cmd_gsettings_key);
g_free (key_entry);
}
}
gtk_tree_store_clear (GTK_TREE_STORE (model));
}
actions_swindow = _gtk_builder_get_widget (builder, "actions_swindow");
gtk_scrolled_window_set_policy (GTK_SCROLLED_WINDOW (actions_swindow),
GTK_POLICY_NEVER, GTK_POLICY_NEVER);
gtk_widget_set_size_request (actions_swindow, -1, -1);
}
typedef struct {
const char *key;
const char *path;
const char *schema;
gboolean found;
} KeyMatchData;
static gboolean key_match(GtkTreeModel* model, GtkTreePath* path, GtkTreeIter* iter, gpointer data)
{
KeyMatchData* match_data = data;
KeyEntry* element = NULL;
gchar *element_schema = NULL;
gchar *element_path = NULL;
gtk_tree_model_get(model, iter,
KEYENTRY_COLUMN, &element,
-1);
if (element && element->settings && G_IS_SETTINGS(element->settings))
{
g_object_get (element->settings, "schema-id", &element_schema, NULL);
g_object_get (element->settings, "path", &element_path, NULL);
}
if (element && g_strcmp0(element->gsettings_key, match_data->key) == 0
&& g_strcmp0(element_schema, match_data->schema) == 0
&& g_strcmp0(element_path, match_data->path) == 0)
{
match_data->found = TRUE;
return TRUE;
}
return FALSE;
}
static gboolean key_is_already_shown(GtkTreeModel* model, const KeyListEntry* entry)
{
KeyMatchData data;
data.key = entry->name;
data.schema = entry->schema;
data.path = entry->gsettings_path;
data.found = FALSE;
gtk_tree_model_foreach(model, key_match, &data);
return data.found;
}
static gboolean should_show_key(const KeyListEntry* entry)
{
GSettings *settings;
int value;
if (entry->comparison == COMPARISON_NONE)
{
return TRUE;
}
g_return_val_if_fail(entry->value_key != NULL, FALSE);
g_return_val_if_fail(entry->value_schema != NULL, FALSE);
settings = g_settings_new (entry->value_schema);
value = g_settings_get_int (settings, entry->value_key);
g_object_unref (settings);
switch (entry->comparison)
{
case COMPARISON_NONE:
/* For compiler warnings */
g_assert_not_reached ();
return FALSE;
case COMPARISON_GT:
if (value > entry->value)
{
return TRUE;
}
break;
case COMPARISON_LT:
if (value < entry->value)
{
return TRUE;
}
break;
case COMPARISON_EQ:
if (value == entry->value)
{
return TRUE;
}
break;
}
return FALSE;
}
static gboolean
count_rows_foreach (GtkTreeModel *model, GtkTreePath *path, GtkTreeIter *iter, gpointer data)
{
gint *rows = data;
(*rows)++;
return FALSE;
}
static void
ensure_scrollbar (GtkBuilder *builder, int i)
{
if (i == MAX_ELEMENTS_BEFORE_SCROLLING)
{
GtkRequisition rectangle;
GObject *actions_swindow = gtk_builder_get_object (builder,
"actions_swindow");
GtkWidget *treeview = _gtk_builder_get_widget (builder,
"shortcut_treeview");
gtk_widget_ensure_style (treeview);
gtk_widget_size_request (treeview, &rectangle);
gtk_widget_set_size_request (treeview, -1, rectangle.height);
gtk_scrolled_window_set_policy (GTK_SCROLLED_WINDOW (actions_swindow),
GTK_POLICY_NEVER, GTK_POLICY_AUTOMATIC);
}
}
static void
find_section (GtkTreeModel *model,
GtkTreeIter *iter,
const char *title)
{
gboolean success;
success = gtk_tree_model_get_iter_first (model, iter);
while (success)
{
char *description = NULL;
gtk_tree_model_get (model, iter,
DESCRIPTION_COLUMN, &description,
-1);
if (g_strcmp0 (description, title) == 0)
return;
success = gtk_tree_model_iter_next (model, iter);
}
gtk_tree_store_append (GTK_TREE_STORE (model), iter, NULL);
gtk_tree_store_set (GTK_TREE_STORE (model), iter,
DESCRIPTION_COLUMN, title,
-1);
}
static void
append_keys_to_tree (GtkBuilder *builder,
const gchar *title,
const gchar *schema,
const gchar *package,
const KeyListEntry *keys_list)
{
GtkTreeIter parent_iter, iter;
GtkTreeModel *model;
gint i, j;
model = gtk_tree_view_get_model (GTK_TREE_VIEW (gtk_builder_get_object (builder, "shortcut_treeview")));
/* Try to find a section parent iter, if it already exists */
find_section (model, &iter, title);
parent_iter = iter;
i = 0;
gtk_tree_model_foreach (model, count_rows_foreach, &i);
/* If the header we just added is the MAX_ELEMENTS_BEFORE_SCROLLING th,
* then we need to scroll now */
ensure_scrollbar (builder, i - 1);
for (j = 0; keys_list[j].name != NULL; j++)
{
GSettings *settings = NULL;
gchar *settings_path;
KeyEntry *key_entry;
const gchar *key_string;
gchar *key_value;
gchar *description;
gchar *command;
if (!should_show_key (&keys_list[j]))
continue;
if (key_is_already_shown (model, &keys_list[j]))
continue;
key_string = keys_list[j].name;
if (keys_list[j].gsettings_path != NULL)
{
settings = g_settings_new_with_path (schema, keys_list[j].gsettings_path);
settings_path = g_strdup(keys_list[j].gsettings_path);
}
else
{
settings = g_settings_new (schema);
settings_path = NULL;
}
if (keys_list[j].description_key != NULL)
{
/* it's a custom shortcut, so description is in gsettings */
description = g_settings_get_string (settings, keys_list[j].description_key);
}
else
{
/* it's from keyfile, so description need to be translated */
description = keys_list[j].description;
if (package)
{
bind_textdomain_codeset (package, "UTF-8");
description = dgettext (package, description);
}
else
{
description = _(description);
}
}
if (description == NULL)
{
/* Only print a warning for keys that should have a schema */
if (keys_list[j].description_key == NULL)
g_warning ("No description for key '%s'", key_string);
}
if (keys_list[j].cmd_key != NULL)
{
command = g_settings_get_string (settings, keys_list[j].cmd_key);
}
else
{
command = NULL;
}
key_entry = g_new0 (KeyEntry, 1);
key_entry->settings = settings;
key_entry->gsettings_path = settings_path;
key_entry->gsettings_key = g_strdup (key_string);
key_entry->editable = g_settings_is_writable (settings, key_string);
key_entry->model = model;
key_entry->description = description;
key_entry->command = command;
if (keys_list[j].description_key != NULL)
{
key_entry->desc_gsettings_key = g_strdup (keys_list[j].description_key);
key_entry->desc_editable = g_settings_is_writable (settings, key_entry->desc_gsettings_key);
gchar *gsettings_signal = g_strconcat ("changed::", key_entry->desc_gsettings_key, NULL);
key_entry->gsettings_cnxn_desc = g_signal_connect (settings,
gsettings_signal,
G_CALLBACK (keybinding_description_changed),
key_entry);
g_free (gsettings_signal);
}
if (keys_list[j].cmd_key != NULL)
{
key_entry->cmd_gsettings_key = g_strdup (keys_list[j].cmd_key);
key_entry->cmd_editable = g_settings_is_writable (settings, key_entry->cmd_gsettings_key);
gchar *gsettings_signal = g_strconcat ("changed::", key_entry->cmd_gsettings_key, NULL);
key_entry->gsettings_cnxn_cmd = g_signal_connect (settings,
gsettings_signal,
G_CALLBACK (keybinding_command_changed),
key_entry);
g_free (gsettings_signal);
}
gchar *gsettings_signal = g_strconcat ("changed::", key_string, NULL);
key_entry->gsettings_cnxn = g_signal_connect (settings,
gsettings_signal,
G_CALLBACK (keybinding_key_changed),
key_entry);
g_free (gsettings_signal);
key_value = g_settings_get_string (settings, key_string);
binding_from_string (key_value, &key_entry->keyval, &key_entry->keycode, &key_entry->mask);
g_free (key_value);
ensure_scrollbar (builder, i);
++i;
gtk_tree_store_append (GTK_TREE_STORE (model), &iter, &parent_iter);
/* we use the DESCRIPTION_COLUMN only for the section headers */
gtk_tree_store_set (GTK_TREE_STORE (model), &iter,
KEYENTRY_COLUMN, key_entry,
-1);
gtk_tree_view_expand_all (GTK_TREE_VIEW (gtk_builder_get_object (builder, "shortcut_treeview")));
}
/* Don't show an empty section */
if (gtk_tree_model_iter_n_children (model, &parent_iter) == 0)
gtk_tree_store_remove (GTK_TREE_STORE (model), &parent_iter);
if (i == 0)
gtk_widget_hide (_gtk_builder_get_widget (builder, "shortcuts_vbox"));
else
gtk_widget_show (_gtk_builder_get_widget (builder, "shortcuts_vbox"));
}
static void
parse_start_tag (GMarkupParseContext *ctx,
const gchar *element_name,
const gchar **attr_names,
const gchar **attr_values,
gpointer user_data,
GError **error)
{
KeyList *keylist = (KeyList *) user_data;
KeyListEntry key;
const char *name, *value_key, *description, *value_schema;
int value;
Comparison comparison;
const char *schema = NULL;
name = NULL;
/* The top-level element, names the section in the tree */
if (g_str_equal (element_name, "KeyListEntries"))
{
const char *wm_name = NULL;
const char *package = NULL;
while (*attr_names && *attr_values)
{
if (g_str_equal (*attr_names, "name"))
{
if (**attr_values)
name = *attr_values;
}
else if (g_str_equal (*attr_names, "wm_name"))
{
if (**attr_values)
wm_name = *attr_values;
}
else if (g_str_equal (*attr_names, "package"))
{
if (**attr_values)
package = *attr_values;
}
else if (g_str_equal (*attr_names, "schema"))
{
if (**attr_values)
schema = *attr_values;
}
++attr_names;
++attr_values;
}
if (name)
{
if (keylist->name)
g_warning ("Duplicate section name");
g_free (keylist->name);
keylist->name = g_strdup (name);
}
if (wm_name)
{
if (keylist->wm_name)
g_warning ("Duplicate window manager name");
g_free (keylist->wm_name);
keylist->wm_name = g_strdup (wm_name);
}
if (package)
{
if (keylist->package)
g_warning ("Duplicate gettext package name");
g_free (keylist->package);
keylist->package = g_strdup (package);
}
if (schema)
{
if (keylist->schema)
g_warning ("Duplicate schema name");
g_free (keylist->schema);
keylist->schema = g_strdup (schema);
}
return;
}
if (!g_str_equal (element_name, "KeyListEntry")
|| attr_names == NULL
|| attr_values == NULL)
return;
value = 0;
comparison = COMPARISON_NONE;
value_key = NULL;
value_schema = NULL;
description = NULL;
while (*attr_names && *attr_values)
{
if (g_str_equal (*attr_names, "name"))
{
/* skip if empty */
if (**attr_values)
name = *attr_values;
} else if (g_str_equal (*attr_names, "value")) {
if (**attr_values) {
value = (int) g_ascii_strtoull (*attr_values, NULL, 0);
}
} else if (g_str_equal (*attr_names, "key")) {
if (**attr_values) {
value_key = *attr_values;
}
} else if (g_str_equal (*attr_names, "comparison")) {
if (**attr_values) {
if (g_str_equal (*attr_values, "gt")) {
comparison = COMPARISON_GT;
} else if (g_str_equal (*attr_values, "lt")) {
comparison = COMPARISON_LT;
} else if (g_str_equal (*attr_values, "eq")) {
comparison = COMPARISON_EQ;
}
}
} else if (g_str_equal (*attr_names, "description")) {
if (**attr_values) {
description = *attr_values;
}
} else if (g_str_equal (*attr_names, "schema")) {
if (**attr_values) {
value_schema = *attr_values;
}
}
++attr_names;
++attr_values;
}
if (name == NULL)
return;
key.name = g_strdup (name);
key.gsettings_path = NULL;
key.description_key = NULL;
key.description = g_strdup(description);
key.schema = g_strdup(schema);
key.value = value;
if (value_key) {
key.value_key = g_strdup (value_key);
key.value_schema = g_strdup (value_schema);
}
else {
key.value_key = NULL;
key.value_schema = NULL;
}
key.comparison = comparison;
key.cmd_key = NULL;
g_array_append_val (keylist->entries, key);
}
static gboolean
strv_contains (char **strv,
char *str)
{
char **p = strv;
for (p = strv; *p; p++)
if (strcmp (*p, str) == 0)
return TRUE;
return FALSE;
}
static void
append_keys_to_tree_from_file (GtkBuilder *builder,
const char *filename,
char **wm_keybindings)
{
GMarkupParseContext *ctx;
GMarkupParser parser = { parse_start_tag, NULL, NULL, NULL, NULL };
KeyList *keylist;
KeyListEntry key, *keys;
GError *err = NULL;
char *buf;
const char *title;
gsize buf_len;
guint i;
if (!g_file_get_contents (filename, &buf, &buf_len, &err))
return;
keylist = g_new0 (KeyList, 1);
keylist->entries = g_array_new (FALSE, TRUE, sizeof (KeyListEntry));
ctx = g_markup_parse_context_new (&parser, 0, keylist, NULL);
if (!g_markup_parse_context_parse (ctx, buf, buf_len, &err))
{
g_warning ("Failed to parse '%s': '%s'", filename, err->message);
g_error_free (err);
g_free (keylist->name);
g_free (keylist->package);
g_free (keylist->wm_name);
g_free (keylist->schema);
for (i = 0; i < keylist->entries->len; i++)
g_free (((KeyListEntry *) &(keylist->entries->data[i]))->name);
g_array_free (keylist->entries, TRUE);
g_free (keylist);
keylist = NULL;
}
g_markup_parse_context_free (ctx);
g_free (buf);
if (keylist == NULL)
return;
/* If there's no keys to add, or the settings apply to a window manager
* that's not the one we're running */
if (keylist->entries->len == 0
|| (keylist->wm_name != NULL && !strv_contains (wm_keybindings, keylist->wm_name))
|| keylist->name == NULL)
{
g_free (keylist->name);
g_free (keylist->package);
g_free (keylist->wm_name);
g_free (keylist->schema);
g_array_free (keylist->entries, TRUE);
g_free (keylist);
return;
}
/* Empty KeyListEntry to end the array */
key.name = NULL;
key.description_key = NULL;
key.value_key = NULL;
key.value = 0;
key.comparison = COMPARISON_NONE;
g_array_append_val (keylist->entries, key);
keys = (KeyListEntry *) g_array_free (keylist->entries, FALSE);
if (keylist->package)
{
bind_textdomain_codeset (keylist->package, "UTF-8");
title = dgettext (keylist->package, keylist->name);
} else {
title = _(keylist->name);
}
append_keys_to_tree (builder, title, keylist->schema, keylist->package, keys);
g_free (keylist->name);
g_free (keylist->package);
for (i = 0; keys[i].name != NULL; i++)
g_free (keys[i].name);
g_free (keylist);
}
static void
append_keys_to_tree_from_gsettings (GtkBuilder *builder, const gchar *gsettings_path)
{
gchar **custom_list;
GArray *entries;
KeyListEntry key;
gint i;
/* load custom shortcuts from GSettings */
entries = g_array_new (FALSE, TRUE, sizeof (KeyListEntry));
key.value_key = NULL;
key.value = 0;
key.comparison = COMPARISON_NONE;
custom_list = dconf_util_list_subdirs (gsettings_path, FALSE);
if (custom_list != NULL)
{
for (i = 0; custom_list[i] != NULL; i++)
{
key.gsettings_path = g_strdup_printf("%s%s", gsettings_path, custom_list[i]);
key.name = g_strdup("binding");
key.cmd_key = g_strdup("action");
key.description_key = g_strdup("name");
key.schema = NULL;
g_array_append_val (entries, key);
}
}
g_strfreev (custom_list);
if (entries->len > 0)
{
KeyListEntry *keys;
int i;
/* Empty KeyListEntry to end the array */
key.gsettings_path = NULL;
key.name = NULL;
key.description_key = NULL;
key.cmd_key = NULL;
g_array_append_val (entries, key);
keys = (KeyListEntry *) entries->data;
append_keys_to_tree (builder, _("Custom Shortcuts"), CUSTOM_KEYBINDING_SCHEMA, NULL, keys);
for (i = 0; i < entries->len; ++i)
{
g_free (keys[i].name);
g_free (keys[i].description_key);
g_free (keys[i].cmd_key);
g_free (keys[i].gsettings_path);
}
}
g_array_free (entries, TRUE);
}
static void
reload_key_entries (GtkBuilder *builder)
{
gchar **wm_keybindings;
GDir *dir;
const char *name;
GList *list, *l;
wm_keybindings = wm_common_get_current_keybindings();
clear_old_model (builder);
dir = g_dir_open (MATECC_KEYBINDINGS_DIR, 0, NULL);
if (!dir)
return;
list = NULL;
for (name = g_dir_read_name (dir) ; name ; name = g_dir_read_name (dir))
{
if (g_str_has_suffix (name, ".xml"))
{
list = g_list_insert_sorted (list, g_strdup (name),
(GCompareFunc) g_ascii_strcasecmp);
}
}
g_dir_close (dir);
for (l = list; l != NULL; l = l->next)
{
gchar *path;
path = g_build_filename (MATECC_KEYBINDINGS_DIR, l->data, NULL);
append_keys_to_tree_from_file (builder, path, wm_keybindings);
g_free (l->data);
g_free (path);
}
g_list_free (list);
/* Load custom shortcuts _after_ system-provided ones,
* since some of the custom shortcuts may also be listed
* in a file. Loading the custom shortcuts last makes
* such keys not show up in the custom section.
*/
append_keys_to_tree_from_gsettings (builder, GSETTINGS_KEYBINDINGS_DIR);
g_strfreev (wm_keybindings);
}
static void
key_entry_controlling_key_changed (GSettings *settings, gchar *key, gpointer user_data)
{
reload_key_entries (user_data);
}
static gboolean cb_check_for_uniqueness(GtkTreeModel* model, GtkTreePath* path, GtkTreeIter* iter, KeyEntry* new_key)
{
KeyEntry* element;
gtk_tree_model_get (new_key->model, iter,
KEYENTRY_COLUMN, &element,
-1);
/* no conflict for : blanks, different modifiers, or ourselves */
if (element == NULL || new_key->mask != element->mask)
{
return FALSE;
}
gchar *new_key_schema = NULL;
gchar *element_schema = NULL;
gchar *new_key_path = NULL;
gchar *element_path = NULL;
if (new_key && new_key->settings)
{
g_object_get (new_key->settings, "schema-id", &new_key_schema, NULL);
g_object_get (new_key->settings, "path", &new_key_path, NULL);
}
if (element->settings)
{
g_object_get (element->settings, "schema-id", &element_schema, NULL);
g_object_get (element->settings, "path", &element_path, NULL);
}
if (!g_strcmp0 (new_key->gsettings_key, element->gsettings_key) &&
!g_strcmp0 (new_key_schema, element_schema) &&
!g_strcmp0 (new_key_path, element_path))
{
return FALSE;
}
if (new_key->keyval != 0)
{
if (new_key->keyval != element->keyval)
{
return FALSE;
}
}
else if (element->keyval != 0 || new_key->keycode != element->keycode)
{
return FALSE;
}
new_key->editable = FALSE;
new_key->settings = element->settings;
new_key->gsettings_key = element->gsettings_key;
new_key->description = element->description;
new_key->desc_gsettings_key = element->desc_gsettings_key;
new_key->desc_editable = element->desc_editable;
return TRUE;
}
static const guint forbidden_keyvals[] = {
/* Navigation keys */
GDK_Home,
GDK_Left,
GDK_Up,
GDK_Right,
GDK_Down,
GDK_Page_Up,
GDK_Page_Down,
GDK_End,
GDK_Tab,
/* Return */
GDK_KP_Enter,
GDK_Return,
GDK_space,
GDK_Mode_switch
};
static gboolean keyval_is_forbidden(guint keyval)
{
guint i;
for (i = 0; i < G_N_ELEMENTS(forbidden_keyvals); i++)
{
if (keyval == forbidden_keyvals[i])
{
return TRUE;
}
}
return FALSE;
}
static void show_error(GtkWindow* parent, GError* err)
{
GtkWidget *dialog;
dialog = gtk_message_dialog_new (parent,
GTK_DIALOG_DESTROY_WITH_PARENT | GTK_DIALOG_MODAL,
GTK_MESSAGE_WARNING,
GTK_BUTTONS_OK,
_("Error saving the new shortcut"));
gtk_message_dialog_format_secondary_text (GTK_MESSAGE_DIALOG (dialog),
"%s", err->message);
gtk_dialog_run (GTK_DIALOG (dialog));
gtk_widget_destroy (dialog);
}
static void accel_edited_callback(GtkCellRendererText* cell, const char* path_string, guint keyval, EggVirtualModifierType mask, guint keycode, gpointer data)
{
GtkTreeView* view = (GtkTreeView*) data;
GtkTreeModel* model;
GtkTreePath* path = gtk_tree_path_new_from_string (path_string);
GtkTreeIter iter;
KeyEntry* key_entry, tmp_key;
char* str;
block_accels = FALSE;
model = gtk_tree_view_get_model (view);
gtk_tree_model_get_iter (model, &iter, path);
gtk_tree_path_free (path);
gtk_tree_model_get (model, &iter,
KEYENTRY_COLUMN, &key_entry,
-1);
/* sanity check */
if (key_entry == NULL)
{
return;
}
/* CapsLock isn't supported as a keybinding modifier, so keep it from confusing us */
mask &= ~EGG_VIRTUAL_LOCK_MASK;
tmp_key.model = model;
tmp_key.keyval = keyval;
tmp_key.keycode = keycode;
tmp_key.mask = mask;
tmp_key.settings = key_entry->settings;
tmp_key.gsettings_key = key_entry->gsettings_key;
tmp_key.description = NULL;
tmp_key.editable = TRUE; /* kludge to stuff in a return flag */
if (keyval != 0 || keycode != 0) /* any number of keys can be disabled */
{
gtk_tree_model_foreach(model, (GtkTreeModelForeachFunc) cb_check_for_uniqueness, &tmp_key);
}
/* Check for unmodified keys */
if (tmp_key.mask == 0 && tmp_key.keycode != 0)
{
if ((tmp_key.keyval >= GDK_a && tmp_key.keyval <= GDK_z)
|| (tmp_key.keyval >= GDK_A && tmp_key.keyval <= GDK_Z)
|| (tmp_key.keyval >= GDK_0 && tmp_key.keyval <= GDK_9)
|| (tmp_key.keyval >= GDK_kana_fullstop && tmp_key.keyval <= GDK_semivoicedsound)
|| (tmp_key.keyval >= GDK_Arabic_comma && tmp_key.keyval <= GDK_Arabic_sukun)
|| (tmp_key.keyval >= GDK_Serbian_dje && tmp_key.keyval <= GDK_Cyrillic_HARDSIGN)
|| (tmp_key.keyval >= GDK_Greek_ALPHAaccent && tmp_key.keyval <= GDK_Greek_omega)
|| (tmp_key.keyval >= GDK_hebrew_doublelowline && tmp_key.keyval <= GDK_hebrew_taf)
|| (tmp_key.keyval >= GDK_Thai_kokai && tmp_key.keyval <= GDK_Thai_lekkao)
|| (tmp_key.keyval >= GDK_Hangul && tmp_key.keyval <= GDK_Hangul_Special)
|| (tmp_key.keyval >= GDK_Hangul_Kiyeog && tmp_key.keyval <= GDK_Hangul_J_YeorinHieuh)
|| keyval_is_forbidden (tmp_key.keyval))
{
GtkWidget *dialog;
char *name;
name = binding_name (keyval, keycode, mask, TRUE);
dialog = gtk_message_dialog_new (
GTK_WINDOW (gtk_widget_get_toplevel (GTK_WIDGET (view))),
GTK_DIALOG_DESTROY_WITH_PARENT | GTK_DIALOG_MODAL,
GTK_MESSAGE_WARNING,
GTK_BUTTONS_CANCEL,
_("The shortcut \"%s\" cannot be used because it will become impossible to type using this key.\n"
"Please try with a key such as Control, Alt or Shift at the same time."),
name);
g_free (name);
gtk_dialog_run (GTK_DIALOG (dialog));
gtk_widget_destroy (dialog);
/* set it back to its previous value. */
egg_cell_renderer_keys_set_accelerator(
EGG_CELL_RENDERER_KEYS(cell),
key_entry->keyval,
key_entry->keycode,
key_entry->mask);
return;
}
}
/* flag to see if the new accelerator was in use by something */
if (!tmp_key.editable)
{
GtkWidget* dialog;
char* name;
int response;
name = binding_name(keyval, keycode, mask, TRUE);
dialog = gtk_message_dialog_new(
GTK_WINDOW(gtk_widget_get_toplevel(GTK_WIDGET(view))),
GTK_DIALOG_DESTROY_WITH_PARENT | GTK_DIALOG_MODAL,
GTK_MESSAGE_WARNING,
GTK_BUTTONS_CANCEL,
_("The shortcut \"%s\" is already used for\n\"%s\""),
name,
tmp_key.description ? tmp_key.description : tmp_key.gsettings_key);
g_free (name);
gtk_message_dialog_format_secondary_text (
GTK_MESSAGE_DIALOG (dialog),
_("If you reassign the shortcut to \"%s\", the \"%s\" shortcut "
"will be disabled."),
key_entry->description ? key_entry->description : key_entry->gsettings_key,
tmp_key.description ? tmp_key.description : tmp_key.gsettings_key);
gtk_dialog_add_button(GTK_DIALOG (dialog), _("_Reassign"), GTK_RESPONSE_ACCEPT);
gtk_dialog_set_default_response(GTK_DIALOG (dialog), GTK_RESPONSE_ACCEPT);
response = gtk_dialog_run (GTK_DIALOG (dialog));
gtk_widget_destroy (dialog);
if (response == GTK_RESPONSE_ACCEPT)
{
g_settings_set_string (tmp_key.settings, tmp_key.gsettings_key, "disabled");
str = binding_name (keyval, keycode, mask, FALSE);
g_settings_set_string (key_entry->settings, key_entry->gsettings_key, str);
g_free (str);
}
else
{
/* set it back to its previous value. */
egg_cell_renderer_keys_set_accelerator(
EGG_CELL_RENDERER_KEYS(cell),
key_entry->keyval,
key_entry->keycode,
key_entry->mask);
}
return;
}
str = binding_name (keyval, keycode, mask, FALSE);
g_settings_set_string(
key_entry->settings,
key_entry->gsettings_key,
str);
g_free (str);
}
static void
accel_cleared_callback (GtkCellRendererText *cell,
const char *path_string,
gpointer data)
{
GtkTreeView *view = (GtkTreeView *) data;
GtkTreePath *path = gtk_tree_path_new_from_string (path_string);
KeyEntry *key_entry;
GtkTreeIter iter;
GtkTreeModel *model;
block_accels = FALSE;
model = gtk_tree_view_get_model (view);
gtk_tree_model_get_iter (model, &iter, path);
gtk_tree_path_free (path);
gtk_tree_model_get (model, &iter,
KEYENTRY_COLUMN, &key_entry,
-1);
/* sanity check */
if (key_entry == NULL)
return;
/* Unset the key */
g_settings_set_string (key_entry->settings,
key_entry->gsettings_key,
"disabled");
}
static void
description_edited_callback (GtkCellRendererText *renderer,
gchar *path_string,
gchar *new_text,
gpointer user_data)
{
GtkTreeView *view = GTK_TREE_VIEW (user_data);
GtkTreeModel *model;
GtkTreePath *path = gtk_tree_path_new_from_string (path_string);
GtkTreeIter iter;
KeyEntry *key_entry;
model = gtk_tree_view_get_model (view);
gtk_tree_model_get_iter (model, &iter, path);
gtk_tree_path_free (path);
gtk_tree_model_get (model, &iter,
KEYENTRY_COLUMN, &key_entry,
-1);
/* sanity check */
if (key_entry == NULL || key_entry->desc_gsettings_key == NULL)
return;
if (!g_settings_set_string (key_entry->settings, key_entry->desc_gsettings_key, new_text))
key_entry->desc_editable = FALSE;
}
typedef struct
{
GtkTreeView *tree_view;
GtkTreePath *path;
GtkTreeViewColumn *column;
} IdleData;
static gboolean
real_start_editing_cb (IdleData *idle_data)
{
gtk_widget_grab_focus (GTK_WIDGET (idle_data->tree_view));
gtk_tree_view_set_cursor (idle_data->tree_view,
idle_data->path,
idle_data->column,
TRUE);
gtk_tree_path_free (idle_data->path);
g_free (idle_data);
return FALSE;
}
static gboolean
edit_custom_shortcut (KeyEntry *key)
{
gint result;
const gchar *text;
gboolean ret;
gtk_entry_set_text (GTK_ENTRY (custom_shortcut_name_entry), key->description ? key->description : "");
gtk_widget_set_sensitive (custom_shortcut_name_entry, key->desc_editable);
gtk_widget_grab_focus (custom_shortcut_name_entry);
gtk_entry_set_text (GTK_ENTRY (custom_shortcut_command_entry), key->command ? key->command : "");
gtk_widget_set_sensitive (custom_shortcut_command_entry, key->cmd_editable);
gtk_window_present (GTK_WINDOW (custom_shortcut_dialog));
result = gtk_dialog_run (GTK_DIALOG (custom_shortcut_dialog));
switch (result)
{
case GTK_RESPONSE_OK:
text = gtk_entry_get_text (GTK_ENTRY (custom_shortcut_name_entry));
g_free (key->description);
key->description = g_strdup (text);
text = gtk_entry_get_text (GTK_ENTRY (custom_shortcut_command_entry));
g_free (key->command);
key->command = g_strdup (text);
ret = TRUE;
break;
default:
ret = FALSE;
break;
}
gtk_widget_hide (custom_shortcut_dialog);
return ret;
}
static gboolean
remove_custom_shortcut (GtkTreeModel *model, GtkTreeIter *iter)
{
GtkTreeIter parent;
KeyEntry *key;
gtk_tree_model_get (model, iter,
KEYENTRY_COLUMN, &key,
-1);
/* not a custom shortcut */
if (key->command == NULL)
return FALSE;
g_signal_handler_disconnect (key->settings, key->gsettings_cnxn);
if (key->gsettings_cnxn_desc != 0)
g_signal_handler_disconnect (key->settings, key->gsettings_cnxn_desc);
if (key->gsettings_cnxn_cmd != 0)
g_signal_handler_disconnect (key->settings, key->gsettings_cnxn_cmd);
dconf_util_recursive_reset (key->gsettings_path, NULL);
g_object_unref (key->settings);
g_free (key->gsettings_path);
g_free (key->gsettings_key);
g_free (key->description);
g_free (key->desc_gsettings_key);
g_free (key->command);
g_free (key->cmd_gsettings_key);
g_free (key);
gtk_tree_model_iter_parent (model, &parent, iter);
gtk_tree_store_remove (GTK_TREE_STORE (model), iter);
if (!gtk_tree_model_iter_has_child (model, &parent))
gtk_tree_store_remove (GTK_TREE_STORE (model), &parent);
return TRUE;
}
static void
update_custom_shortcut (GtkTreeModel *model, GtkTreeIter *iter)
{
KeyEntry *key;
gtk_tree_model_get (model, iter,
KEYENTRY_COLUMN, &key,
-1);
edit_custom_shortcut (key);
if (key->command == NULL || key->command[0] == '\0')
{
remove_custom_shortcut (model, iter);
}
else
{
gtk_tree_store_set (GTK_TREE_STORE (model), iter,
KEYENTRY_COLUMN, key, -1);
if (key->description != NULL)
g_settings_set_string (key->settings, key->desc_gsettings_key, key->description);
else
g_settings_reset (key->settings, key->desc_gsettings_key);
g_settings_set_string (key->settings, key->cmd_gsettings_key, key->command);
}
}
static gchar *
find_free_gsettings_path (GError **error)
{
gchar **existing_dirs;
gchar *dir = NULL;
gchar *fulldir = NULL;
int i;
int j;
gboolean found;
existing_dirs = dconf_util_list_subdirs (GSETTINGS_KEYBINDINGS_DIR, FALSE);
for (i = 0; i < MAX_CUSTOM_SHORTCUTS; i++)
{
found = TRUE;
dir = g_strdup_printf ("custom%d/", i);
for (j = 0; existing_dirs[j] != NULL; j++)
if (!g_strcmp0(dir, existing_dirs[j]))
{
found = FALSE;
g_free (dir);
break;
}
if (found)
break;
}
g_strfreev (existing_dirs);
if (i == MAX_CUSTOM_SHORTCUTS)
{
g_free (dir);
dir = NULL;
g_set_error_literal (error,
g_quark_from_string ("Keyboard Shortcuts"),
0,
_("Too many custom shortcuts"));
}
fulldir = g_strdup_printf ("%s%s", GSETTINGS_KEYBINDINGS_DIR, dir);
g_free (dir);
return fulldir;
}
static void
add_custom_shortcut (GtkTreeView *tree_view,
GtkTreeModel *model)
{
KeyEntry *key_entry;
GtkTreeIter iter;
GtkTreeIter parent_iter;
GtkTreePath *path;
gchar *dir;
GError *error;
error = NULL;
dir = find_free_gsettings_path (&error);
if (dir == NULL)
{
show_error (GTK_WINDOW (gtk_widget_get_toplevel (GTK_WIDGET (tree_view))), error);
g_error_free (error);
return;
}
key_entry = g_new0 (KeyEntry, 1);
key_entry->gsettings_path = g_strdup(dir);
key_entry->gsettings_key = g_strdup("binding");
key_entry->editable = TRUE;
key_entry->model = model;
key_entry->desc_gsettings_key = g_strdup("name");
key_entry->description = g_strdup ("");
key_entry->desc_editable = TRUE;
key_entry->cmd_gsettings_key = g_strdup("action");
key_entry->command = g_strdup ("");
key_entry->cmd_editable = TRUE;
g_free (dir);
if (edit_custom_shortcut (key_entry) &&
key_entry->command && key_entry->command[0])
{
find_section (model, &iter, _("Custom Shortcuts"));
parent_iter = iter;
gtk_tree_store_append (GTK_TREE_STORE (model), &iter, &parent_iter);
gtk_tree_store_set (GTK_TREE_STORE (model), &iter, KEYENTRY_COLUMN, key_entry, -1);
/* store in gsettings */
key_entry->settings = g_settings_new_with_path (CUSTOM_KEYBINDING_SCHEMA, key_entry->gsettings_path);
g_settings_set_string (key_entry->settings, key_entry->gsettings_key, "disabled");
g_settings_set_string (key_entry->settings, key_entry->desc_gsettings_key, key_entry->description);
g_settings_set_string (key_entry->settings, key_entry->cmd_gsettings_key, key_entry->command);
/* add gsettings watches */
key_entry->gsettings_cnxn_desc = g_signal_connect (key_entry->settings,
"changed::name",
G_CALLBACK (keybinding_description_changed),
key_entry);
key_entry->gsettings_cnxn_cmd = g_signal_connect (key_entry->settings,
"changed::action",
G_CALLBACK (keybinding_command_changed),
key_entry);
key_entry->gsettings_cnxn = g_signal_connect (key_entry->settings,
"changed::binding",
G_CALLBACK (keybinding_key_changed),
key_entry);
/* make the new shortcut visible */
path = gtk_tree_model_get_path (model, &iter);
gtk_tree_view_expand_to_path (tree_view, path);
gtk_tree_view_scroll_to_cell (tree_view, path, NULL, FALSE, 0, 0);
gtk_tree_path_free (path);
}
else
{
g_free (key_entry->gsettings_path);
g_free (key_entry->gsettings_key);
g_free (key_entry->description);
g_free (key_entry->desc_gsettings_key);
g_free (key_entry->command);
g_free (key_entry->cmd_gsettings_key);
g_free (key_entry);
}
}
static void
start_editing_kb_cb (GtkTreeView *treeview,
GtkTreePath *path,
GtkTreeViewColumn *column,
gpointer user_data)
{
GtkTreeModel *model;
GtkTreeIter iter;
KeyEntry *key;
model = gtk_tree_view_get_model (treeview);
gtk_tree_model_get_iter (model, &iter, path);
gtk_tree_model_get (model, &iter,
KEYENTRY_COLUMN, &key,
-1);
if (key == NULL)
{
/* This is a section heading - expand or collapse */
if (gtk_tree_view_row_expanded (treeview, path))
gtk_tree_view_collapse_row (treeview, path);
else
gtk_tree_view_expand_row (treeview, path, FALSE);
return;
}
/* if only the accel can be edited on the selected row
* always select the accel column */
if (key->desc_editable &&
column == gtk_tree_view_get_column (treeview, 0))
{
gtk_widget_grab_focus (GTK_WIDGET (treeview));
gtk_tree_view_set_cursor (treeview, path,
gtk_tree_view_get_column (treeview, 0),
FALSE);
update_custom_shortcut (model, &iter);
}
else
{
gtk_widget_grab_focus (GTK_WIDGET (treeview));
gtk_tree_view_set_cursor (treeview,
path,
gtk_tree_view_get_column (treeview, 1),
TRUE);
}
}
static gboolean
start_editing_cb (GtkTreeView *tree_view,
GdkEventButton *event,
gpointer user_data)
{
GtkTreePath *path;
GtkTreeViewColumn *column;
if (event->window != gtk_tree_view_get_bin_window (tree_view))
return FALSE;
if (gtk_tree_view_get_path_at_pos (tree_view,
(gint) event->x,
(gint) event->y,
&path, &column,
NULL, NULL))
{
IdleData *idle_data;
GtkTreeModel *model;
GtkTreeIter iter;
KeyEntry *key;
if (gtk_tree_path_get_depth (path) == 1)
{
gtk_tree_path_free (path);
return FALSE;
}
model = gtk_tree_view_get_model (tree_view);
gtk_tree_model_get_iter (model, &iter, path);
gtk_tree_model_get (model, &iter,
KEYENTRY_COLUMN, &key,
-1);
/* if only the accel can be edited on the selected row
* always select the accel column */
if (key->desc_editable &&
column == gtk_tree_view_get_column (tree_view, 0))
{
gtk_widget_grab_focus (GTK_WIDGET (tree_view));
gtk_tree_view_set_cursor (tree_view, path,
gtk_tree_view_get_column (tree_view, 0),
FALSE);
update_custom_shortcut (model, &iter);
}
else
{
idle_data = g_new (IdleData, 1);
idle_data->tree_view = tree_view;
idle_data->path = path;
idle_data->column = key->desc_editable ? column :
gtk_tree_view_get_column (tree_view, 1);
g_idle_add ((GSourceFunc) real_start_editing_cb, idle_data);
block_accels = TRUE;
}
g_signal_stop_emission_by_name (tree_view, "button_press_event");
}
return TRUE;
}
/* this handler is used to keep accels from activating while the user
* is assigning a new shortcut so that he won't accidentally trigger one
* of the widgets */
static gboolean maybe_block_accels(GtkWidget* widget, GdkEventKey* event, gpointer user_data)
{
if (block_accels)
{
return gtk_window_propagate_key_event(GTK_WINDOW(widget), event);
}
return FALSE;
}
static void
cb_dialog_response (GtkWidget *widget, gint response_id, gpointer data)
{
GtkBuilder *builder = data;
GtkTreeView *treeview;
GtkTreeModel *model;
GtkTreeSelection *selection;
GtkTreeIter iter;
treeview = GTK_TREE_VIEW (gtk_builder_get_object (builder,
"shortcut_treeview"));
model = gtk_tree_view_get_model (treeview);
if (response_id == GTK_RESPONSE_HELP)
{
capplet_help (GTK_WINDOW (widget),
"goscustdesk-39");
}
else if (response_id == RESPONSE_ADD)
{
add_custom_shortcut (treeview, model);
}
else if (response_id == RESPONSE_REMOVE)
{
selection = gtk_tree_view_get_selection (treeview);
if (gtk_tree_selection_get_selected (selection, NULL, &iter))
{
remove_custom_shortcut (model, &iter);
}
}
else
{
clear_old_model (builder);
gtk_main_quit ();
}
}
static void
selection_changed (GtkTreeSelection *selection, gpointer data)
{
GtkWidget *button = data;
GtkTreeModel *model;
GtkTreeIter iter;
KeyEntry *key;
gboolean can_remove;
can_remove = FALSE;
if (gtk_tree_selection_get_selected (selection, &model, &iter))
{
gtk_tree_model_get (model, &iter, KEYENTRY_COLUMN, &key, -1);
if (key && key->command != NULL && key->editable)
can_remove = TRUE;
}
gtk_widget_set_sensitive (button, can_remove);
}
static void
setup_dialog (GtkBuilder *builder, GSettings *marco_settings)
{
GtkCellRenderer *renderer;
GtkTreeViewColumn *column;
GtkWidget *widget;
GtkTreeView *treeview;
GtkTreeSelection *selection;
treeview = GTK_TREE_VIEW (gtk_builder_get_object (builder,
"shortcut_treeview"));
g_signal_connect (treeview, "button_press_event",
G_CALLBACK (start_editing_cb), builder);
g_signal_connect (treeview, "row-activated",
G_CALLBACK (start_editing_kb_cb), NULL);
renderer = gtk_cell_renderer_text_new ();
g_signal_connect (renderer, "edited",
G_CALLBACK (description_edited_callback),
treeview);
column = gtk_tree_view_column_new_with_attributes (_("Action"),
renderer,
"text", DESCRIPTION_COLUMN,
NULL);
gtk_tree_view_column_set_cell_data_func (column, renderer, description_set_func, NULL, NULL);
gtk_tree_view_column_set_resizable (column, FALSE);
gtk_tree_view_append_column (treeview, column);
gtk_tree_view_column_set_sort_column_id (column, DESCRIPTION_COLUMN);
renderer = (GtkCellRenderer *) g_object_new (EGG_TYPE_CELL_RENDERER_KEYS,
"accel_mode", EGG_CELL_RENDERER_KEYS_MODE_X,
NULL);
g_signal_connect (renderer, "accel_edited",
G_CALLBACK (accel_edited_callback),
treeview);
g_signal_connect (renderer, "accel_cleared",
G_CALLBACK (accel_cleared_callback),
treeview);
column = gtk_tree_view_column_new_with_attributes (_("Shortcut"), renderer, NULL);
gtk_tree_view_column_set_cell_data_func (column, renderer, accel_set_func, NULL, NULL);
gtk_tree_view_column_set_resizable (column, FALSE);
gtk_tree_view_append_column (treeview, column);
gtk_tree_view_column_set_sort_column_id (column, KEYENTRY_COLUMN);
g_signal_connect (marco_settings,
"changed::num-workspaces",
G_CALLBACK (key_entry_controlling_key_changed),
builder);
/* set up the dialog */
reload_key_entries (builder);
#if GTK_CHECK_VERSION(3, 0, 0)
widget = _gtk_builder_get_widget (builder, "mate-keybinding-dialog");
gtk_window_set_default_size (GTK_WINDOW (widget), 400, 500);
widget = _gtk_builder_get_widget (builder, "label-suggest");
gtk_label_set_line_wrap (GTK_LABEL (widget), TRUE);
gtk_label_set_max_width_chars (GTK_LABEL (widget), 60);
#endif
widget = _gtk_builder_get_widget (builder, "mate-keybinding-dialog");
capplet_set_icon (widget, "preferences-desktop-keyboard-shortcuts");
gtk_widget_show (widget);
g_signal_connect (widget, "key_press_event", G_CALLBACK (maybe_block_accels), NULL);
g_signal_connect (widget, "response", G_CALLBACK (cb_dialog_response), builder);
selection = gtk_tree_view_get_selection (GTK_TREE_VIEW (treeview));
g_signal_connect (selection, "changed",
G_CALLBACK (selection_changed),
_gtk_builder_get_widget (builder, "remove-button"));
/* setup the custom shortcut dialog */
custom_shortcut_dialog = _gtk_builder_get_widget (builder,
"custom-shortcut-dialog");
custom_shortcut_name_entry = _gtk_builder_get_widget (builder,
"custom-shortcut-name-entry");
custom_shortcut_command_entry = _gtk_builder_get_widget (builder,
"custom-shortcut-command-entry");
gtk_dialog_set_default_response (GTK_DIALOG (custom_shortcut_dialog),
GTK_RESPONSE_OK);
gtk_window_set_transient_for (GTK_WINDOW (custom_shortcut_dialog),
GTK_WINDOW (widget));
}
static void
on_window_manager_change (const char *wm_name, GtkBuilder *builder)
{
reload_key_entries (builder);
}
int
main (int argc, char *argv[])
{
GtkBuilder *builder;
GSettings *marco_settings;
gtk_init (&argc, &argv);
bindtextdomain (GETTEXT_PACKAGE, MATELOCALEDIR);
bind_textdomain_codeset (GETTEXT_PACKAGE, "UTF-8");
textdomain (GETTEXT_PACKAGE);
gtk_init (&argc, &argv);
activate_settings_daemon ();
builder = create_builder ();
if (!builder) /* Warning was already printed to console */
exit (EXIT_FAILURE);
wm_common_register_window_manager_change ((GFunc) on_window_manager_change, builder);
marco_settings = g_settings_new ("org.mate.Marco.general");
setup_dialog (builder, marco_settings);
gtk_main ();
g_object_unref (marco_settings);
g_object_unref (builder);
return 0;
}
/*
* vim: sw=2 ts=8 cindent noai bs=2
*/
| City-busz/mate-control-center | capplets/keybindings/mate-keybinding-properties.c | C | gpl-2.0 | 58,766 |
/*
* BackFS Filesystem Cache
* Copyright (c) 2010-2014 William R. Fraser
*/
#define _XOPEN_SOURCE 500 // for pread()
#include "fscache.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <sys/types.h>
#include <dirent.h>
#include <fcntl.h>
#include <libgen.h>
#include <limits.h>
#include <pthread.h>
static pthread_mutex_t lock;
#define BACKFS_LOG_SUBSYS "Cache"
#include "global.h"
#include "fsll.h"
#include "util.h"
extern int backfs_log_level;
extern bool backfs_log_stderr;
static char *cache_dir;
static uint64_t cache_size;
static volatile uint64_t cache_used_size = 0;
struct bucket_node { uint32_t number; struct bucket_node* next; };
static struct bucket_node * volatile to_check;
static bool use_whole_device;
static uint64_t bucket_max_size;
uint64_t prepare_buckets_size_check(const char *root)
{
INFO("taking inventory of cache directory\n");
uint64_t total = 0;
struct dirent *e = malloc(offsetof(struct dirent, d_name) + PATH_MAX + 1);
struct dirent *result = e;
DIR *dir = opendir(root);
struct bucket_node* volatile * next = &to_check;
while (readdir_r(dir, e, &result) == 0 && result != NULL) {
if (e->d_name[0] < '0' || e->d_name[0] > '9') continue;
*next = (struct bucket_node*)malloc(sizeof(struct bucket_node));
(*next)->number = atoi(e->d_name);
next = &((*next)->next);
*next = NULL;
++total;
}
closedir(dir);
FREE(e);
return total;
}
/*
* returns the bucket number corresponding to a bucket path
* i.e. reads the number off the end.
*/
uint32_t bucket_path_to_number(const char *bucketpath)
{
uint32_t number = 0;
size_t s = strlen(bucketpath);
size_t i;
for (i = 1; i < s; i++) {
char c = bucketpath[s - i];
if (c < '0' || c > '9') {
i--;
break;
}
}
for (i = s - i; i < s; i++) {
number *= 10;
number += (bucketpath[i] - '0');
}
return number;
}
bool is_unchecked(const char* path)
{
uint32_t number = bucket_path_to_number(path);
struct bucket_node* node = to_check;
while(node) {
if (node->number == number)
return true;
node = node->next;
}
return false;
}
void* check_buckets_size(void* arg)
{
INFO("starting cache size check\n");
struct stat s;
struct bucket_node* bucket;
if (arg != NULL) {
abort();
}
char buf[PATH_MAX];
while (to_check) {
pthread_mutex_lock(&lock);
bucket = to_check;
if (bucket) {
s.st_size = 0;
snprintf(buf, PATH_MAX, "%s/buckets/%u/data", cache_dir, bucket->number);
if (stat(buf, &s) == -1 && errno != ENOENT) {
PERROR("stat in get_cache_used_size");
ERROR("\tcaused by stat(%s)\n", buf);
abort();
}
DEBUG("bucket %u: %llu bytes\n",
bucket->number, (unsigned long long) s.st_size);
cache_used_size -= bucket_max_size - s.st_size;
to_check = bucket->next;
}
pthread_mutex_unlock(&lock);
free(bucket);
}
INFO("finished cache size check: %llu bytes used\n",
cache_used_size);
return NULL;
}
uint64_t get_cache_fs_free_size(const char *root)
{
struct statvfs s;
if (statvfs(root, &s) == -1) {
PERROR("statfs in get_cache_fs_free_size");
return 0;
}
uint64_t dev_free = (uint64_t) s.f_bavail * s.f_bsize;
return dev_free;
}
/*
* Initialize the cache.
*/
void cache_init(const char *a_cache_dir, uint64_t a_cache_size, uint64_t a_bucket_max_size)
{
cache_dir = (char*)malloc(strlen(a_cache_dir)+1);
strcpy(cache_dir, a_cache_dir);
cache_size = a_cache_size;
use_whole_device = (cache_size == 0);
char bucket_dir[PATH_MAX];
snprintf(bucket_dir, PATH_MAX, "%s/buckets", cache_dir);
uint64_t number_of_buckets = prepare_buckets_size_check(bucket_dir);
INFO("%llu buckets in cache dir\n",
(unsigned long long) number_of_buckets);
cache_used_size = number_of_buckets * a_bucket_max_size;
INFO("Estimated %llu bytes used in cache dir\n",
(unsigned long long) cache_used_size);
uint64_t cache_free_size = get_cache_fs_free_size(bucket_dir);
INFO("%llu bytes free in cache dir\n",
(unsigned long long) cache_free_size);
bucket_max_size = a_bucket_max_size;
if (number_of_buckets > 0) {
pthread_t thread;
if (pthread_create(&thread, NULL, &check_buckets_size, NULL) != 0) {
PERROR("cache_init: error creating checked thread");
abort();
}
}
}
const char * bucketname(const char *path)
{
return fsll_basename(path);
}
void dump_queues()
{
#ifdef FSLL_DUMP
fprintf(stderr, "BackFS Used Bucket Queue:\n");
fsll_dump(cache_dir, "buckets/head", "buckets/tail");
fprintf(stderr, "BackFS Free Bucket Queue:\n");
fsll_dump(cache_dir, "buckets/free_head", "buckets/free_tail");
#endif //FSLL_DUMP
}
/*
* don't use this function directly.
*/
char * makebucket(uint64_t number)
{
char *new_bucket = fsll_make_entry(cache_dir, "buckets", number);
fsll_insert_as_head(cache_dir, new_bucket,
"buckets/head", "buckets/tail");
return new_bucket;
}
/*
* make a new bucket
*
* either re-use one from the free queue,
* or increment the next_bucket_number file and return that.
*
* If one from the free queue is returned, that bucket is made the head of the
* used queue.
*/
char * next_bucket(void)
{
char *bucket = fsll_getlink(cache_dir, "buckets/free_head");
if (bucket != NULL) {
DEBUG("re-using free bucket %s\n", bucketname(bucket));
// disconnect from free queue
fsll_disconnect(cache_dir, bucket,
"buckets/free_head", "buckets/free_tail");
// make head of the used queue
fsll_insert_as_head(cache_dir, bucket,
"buckets/head", "buckets/tail");
return bucket;
} else {
char nbnpath[PATH_MAX];
snprintf(nbnpath, PATH_MAX, "%s/buckets/next_bucket_number", cache_dir);
uint64_t next = 0;
FILE *f = fopen(nbnpath, "r+");
if (f == NULL && errno != ENOENT) {
PERROR("open next_bucket");
return makebucket(0);
} else {
if (f != NULL) {
// we had a number already there; read it
if (fscanf(f, "%llu", (unsigned long long *)&next) != 1) {
ERROR("unable to read next_bucket\n");
fclose(f);
return makebucket(0);
}
f = freopen(nbnpath, "w+", f);
} else {
// next_bucket_number doesn't exist; create it and write a 1.
f = fopen(nbnpath, "w+");
if (f == NULL) {
PERROR("open next_bucket again");
return makebucket(0);
}
}
// write the next number
if (f == NULL) {
PERROR("fdopen for writing in next_bucket");
return makebucket(0);
}
fprintf(f, "%llu\n", (unsigned long long) next+1);
fclose(f);
}
DEBUG("making new bucket %lu\n", (unsigned long) next);
char *new_bucket = makebucket(next);
return new_bucket;
}
}
/*
* moves a bucket to the head of the used queue
*/
void bucket_to_head(const char *bucketpath)
{
DEBUG("bucket_to_head(%s)\n", bucketpath);
fsll_to_head(cache_dir, bucketpath, "buckets/head", "buckets/tail");
}
/*
* Starting at the dirname of path, remove empty directories upwards in the
* path heirarchy.
*
* Stops when it gets to <cache_dir>/buckets or <cache_dir>/map
*/
void trim_directory(const char *path)
{
size_t len = strlen(path);
char *copy = (char*)malloc(len+1);
strncpy(copy, path, len+1);
char map[PATH_MAX];
char buckets[PATH_MAX];
snprintf(map, PATH_MAX, "%s/map", cache_dir);
snprintf(buckets, PATH_MAX, "%s/buckets", cache_dir);
char *dir = dirname(copy);
while ((strcmp(dir, map) != 0) && (strcmp(dir, buckets) != 0)) {
DIR *d = opendir(dir);
struct dirent *e;
bool found_mtime = false;
while ((e = readdir(d)) != NULL) {
if (e->d_name[0] == '.')
continue;
// remove mtime files, if found
if (strcmp(e->d_name, "mtime") == 0) {
struct stat s;
char mtime[PATH_MAX];
snprintf(mtime, PATH_MAX, "%s/mtime", dir);
stat(mtime, &s);
if (S_IFREG & s.st_mode) {
found_mtime = true;
continue;
}
}
// if we got here, the directory has entries
DEBUG("directory has entries -- in %s found '%s'\n", dir, e->d_name);
closedir(d);
FREE(copy);
return;
}
if (found_mtime) {
char mtime[PATH_MAX];
snprintf(mtime, PATH_MAX, "%s/mtime", dir);
if (unlink(mtime) == -1) {
PERROR("in trim_directory, unable to unlink mtime file");
ERROR("\tpath was %s\n", mtime);
} else {
DEBUG("removed mtime file %s/mtime\n", dir);
}
}
closedir(d);
d = NULL;
int result = rmdir(dir);
if (result == -1) {
if (errno != EEXIST && errno != ENOTEMPTY) {
PERROR("in trim_directory, rmdir");
}
WARN("in trim_directory, directory still not empty, but how? path was %s\n", dir);
FREE(copy);
return;
} else {
DEBUG("removed empty map directory %s\n", dir);
}
dir = dirname(dir);
}
FREE(copy);
}
/*
* free a bucket
*
* moves bucket from the tail of the used queue to the tail of the free queue,
* deletes the data in the bucket
* returns the size of the data deleted
*/
uint64_t free_bucket_real(const char *bucketpath, bool free_in_the_middle_is_bad)
{
char *parent = fsll_getlink(bucketpath, "parent");
if (parent && fsll_file_exists(parent, NULL)) {
DEBUG("bucket parent: %s\n", parent);
if (unlink(parent) == -1) {
PERROR("unlink parent in free_bucket");
}
// if this was the last block, remove the directory
trim_directory(parent);
}
FREE(parent);
fsll_makelink(bucketpath, "parent", NULL);
if (free_in_the_middle_is_bad) {
char *n = fsll_getlink(bucketpath, "next");
if (n != NULL) {
ERROR("bucket freed (#%lu) was not the queue tail\n",
(unsigned long) bucket_path_to_number(bucketpath));
FREE(n);
return 0;
}
}
fsll_disconnect(cache_dir, bucketpath,
"buckets/head", "buckets/tail");
fsll_insert_as_tail(cache_dir, bucketpath,
"buckets/free_head", "buckets/free_tail");
char data[PATH_MAX];
snprintf(data, PATH_MAX, "%s/data", bucketpath);
struct stat s;
if (stat(data, &s) == -1) {
PERROR("stat data in free_bucket");
}
pthread_mutex_lock(&lock);
uint64_t result = 0;
if (unlink(data) == -1) {
PERROR("unlink data in free_bucket");
} else {
result = (uint64_t) s.st_size;
if (!is_unchecked(bucketpath)) {
cache_used_size -= result;
}
}
pthread_mutex_unlock(&lock);
return result;
}
uint64_t free_bucket_mid_queue(const char *bucketpath)
{
return free_bucket_real(bucketpath, false);
}
uint64_t free_bucket(const char *bucketpath)
{
return free_bucket_real(bucketpath, true);
}
/*
* do not use this function directly
*/
int cache_invalidate_bucket(const char *filename, uint32_t block,
const char *bucket)
{
DEBUG("invalidating block %lu of file %s\n",
(unsigned long) block, filename);
uint64_t freed_size = free_bucket_mid_queue(bucket);
DEBUG("freed %llu bytes in bucket %s\n",
(unsigned long long) freed_size,
bucketname(bucket));
return 0;
}
int cache_invalidate_file_real(const char *filename, bool error_if_not_exist)
{
char mappath[PATH_MAX];
snprintf(mappath, PATH_MAX, "%s/map%s", cache_dir, filename);
DIR *d = opendir(mappath);
if (d == NULL) {
if (errno != ENOENT || error_if_not_exist) {
PERROR("opendir in cache_invalidate");
}
return -errno;
}
struct dirent *e = malloc(offsetof(struct dirent, d_name) + PATH_MAX + 1);
struct dirent *result = e;
while (readdir_r(d, e, &result) == 0 && result != NULL) {
// probably not needed, because trim_directory would take care of the
// mtime file, but might as well do it now to save time.
if (strcmp(e->d_name, "mtime") == 0) {
char mtime[PATH_MAX];
snprintf(mtime, PATH_MAX, "%s/mtime", mappath);
DEBUG("removed mtime file %s\n", mtime);
unlink(mtime);
continue;
}
if (e->d_name[0] < '0' || e->d_name[0] > '9') continue;
char *bucket = fsll_getlink(mappath, e->d_name);
uint32_t block;
sscanf(e->d_name, "%lu", (unsigned long *)&block);
cache_invalidate_bucket(filename, block, bucket);
FREE(bucket);
}
FREE(e);
closedir(d);
return 0;
}
int cache_invalidate_file_(const char *filename, bool error_if_not_exist)
{
pthread_mutex_lock(&lock);
int retval = cache_invalidate_file_real(filename, error_if_not_exist);
pthread_mutex_unlock(&lock);
return retval;
}
int cache_invalidate_file(const char *filename)
{
return cache_invalidate_file_(filename, true);
}
int cache_try_invalidate_file(const char *filename)
{
return cache_invalidate_file_(filename, false);
}
int cache_invalidate_block_(const char *filename, uint32_t block,
bool warn_if_not_exist)
{
char mappath[PATH_MAX];
snprintf(mappath, PATH_MAX, "map%s/%lu",
filename, (unsigned long) block);
pthread_mutex_lock(&lock);
char *bucket = fsll_getlink(cache_dir, mappath);
if (bucket == NULL) {
if (warn_if_not_exist) {
WARN("Cache invalidation: block %lu of file %s doesn't exist.\n",
(unsigned long) block, filename);
}
pthread_mutex_unlock(&lock);
return -ENOENT;
}
cache_invalidate_bucket(filename, block, bucket);
FREE(bucket);
pthread_mutex_unlock(&lock);
return 0;
}
int cache_invalidate_block(const char *filename, uint32_t block)
{
return cache_invalidate_block_(filename, block, true);
}
int cache_try_invalidate_block(const char *filename, uint32_t block)
{
return cache_invalidate_block_(filename, block, false);
}
int cache_try_invalidate_blocks_above(const char *filename, uint32_t block)
{
DEBUG("trying to invalidate blocks >= %ld in %s\n", block, filename);
int ret = 0;
DIR *mapdir = NULL;
bool locked = false;
struct dirent *e = NULL;
char mappath[PATH_MAX];
snprintf(mappath, PATH_MAX, "%s/map%s", cache_dir, filename);
pthread_mutex_lock(&lock);
locked = true;
mapdir = opendir(mappath);
if (mapdir == NULL) {
ret = -errno;
goto exit;
}
e = malloc(offsetof(struct dirent, d_name) + PATH_MAX + 1);
struct dirent *result = e;
while ((readdir_r(mapdir, e, &result) == 0) && (result != NULL)) {
if ((e->d_name[0] < '0') || (e->d_name[0] > '9')) continue;
uint32_t block_found;
sscanf(e->d_name, "%lu", &block_found);
if (block_found >= block) {
char *bucket = fsll_getlink(mappath, e->d_name);
cache_invalidate_bucket(filename, block_found, bucket);
FREE(bucket);
}
}
exit:
closedir(mapdir);
FREE(e);
if (locked)
pthread_mutex_unlock(&lock);
return ret;
}
int cache_free_orphan_buckets(void)
{
char bucketdir[PATH_MAX];
snprintf(bucketdir, PATH_MAX, "%s/buckets", cache_dir);
pthread_mutex_lock(&lock);
DIR *d = opendir(bucketdir);
if (d == NULL) {
PERROR("opendir in cache_free_orphan_buckets");
pthread_mutex_unlock(&lock);
return -1*errno;
}
struct dirent *e = malloc(offsetof(struct dirent, d_name) + PATH_MAX + 1);
struct dirent *result = e;
while (readdir_r(d, e, &result) == 0 && result != NULL) {
if (e->d_name[0] < '0' || e->d_name[0] > '9') continue;
char bucketpath[PATH_MAX];
snprintf(bucketpath, PATH_MAX, "%s/buckets/%s", cache_dir, e->d_name);
char *parent = fsll_getlink(bucketpath, "parent");
if (fsll_file_exists(bucketpath, "data") &&
(parent == NULL || !fsll_file_exists(parent, NULL))) {
DEBUG("bucket %s is an orphan\n", e->d_name);
if (parent) {
DEBUG("\tparent was %s\n", parent);
}
free_bucket_mid_queue(bucketpath);
}
FREE(parent);
}
closedir(d);
FREE(e);
pthread_mutex_unlock(&lock);
return 0;
}
/*
* Read a block from the cache.
* Important: you can specify less than one block, but not more.
* Nor can a read be across block boundaries.
*
* mtime is the file modification time. If what's in the cache doesn't match
* this, the cache data is invalidated and this function returns -1 and sets
* ENOENT.
*
* Returns 0 on success.
* On error returns -1 and sets errno.
* In particular, if the block is not in the cache, sets ENOENT
*/
int cache_fetch(const char *filename, uint32_t block, uint64_t offset,
char *buf, uint64_t len, uint64_t *bytes_read, time_t mtime)
{
if (offset + len > bucket_max_size || filename == NULL) {
errno = EINVAL;
return -1;
}
if (len == 0) {
*bytes_read = 0;
return 0;
}
DEBUG("getting block %lu of file %s\n", (unsigned long) block, filename);
//###
pthread_mutex_lock(&lock);
char mapfile[PATH_MAX];
snprintf(mapfile, PATH_MAX, "%s/map%s/%lu",
cache_dir, filename, (unsigned long) block);
char bucketpath[PATH_MAX];
ssize_t bplen;
if ((bplen = readlink(mapfile, bucketpath, PATH_MAX-1)) == -1) {
if (errno == ENOENT || errno == ENOTDIR) {
DEBUG("block not in cache\n");
errno = ENOENT;
pthread_mutex_unlock(&lock);
return -1;
} else {
PERROR("readlink error");
errno = EIO;
pthread_mutex_unlock(&lock);
return -1;
}
}
bucketpath[bplen] = '\0';
bucket_to_head(bucketpath);
uint64_t bucket_mtime;
char mtimepath[PATH_MAX];
snprintf(mtimepath, PATH_MAX, "%s/map%s/mtime", cache_dir, filename);
FILE *f = fopen(mtimepath, "r");
if (f == NULL) {
PERROR("open mtime file failed");
bucket_mtime = 0; // will cause invalidation
} else {
if (fscanf(f, "%llu", (unsigned long long *) &bucket_mtime) != 1) {
ERROR("error reading mtime file");
// debug
char buf[4096];
fseek(f, 0, SEEK_SET);
size_t b = fread(buf, 1, 4096, f);
buf[b] = '\0';
ERROR("mtime file contains: %u bytes: %s", (unsigned int) b, buf);
fclose(f);
f = NULL;
unlink(mtimepath);
bucket_mtime = 0; // will cause invalidation
}
}
if (f) fclose(f);
if (bucket_mtime != (uint64_t)mtime) {
// mtime mismatch; invalidate and return
if (bucket_mtime < (uint64_t)mtime) {
DEBUG("cache data is %llu seconds older than the backing data\n",
(unsigned long long) mtime - bucket_mtime);
} else {
DEBUG("cache data is %llu seconds newer than the backing data\n",
(unsigned long long) bucket_mtime - mtime);
}
cache_invalidate_file_real(filename, true);
errno = ENOENT;
pthread_mutex_unlock(&lock);
return -1;
}
// [cache_dir]/buckets/%lu/data
char bucketdata[PATH_MAX];
snprintf(bucketdata, PATH_MAX, "%s/data", bucketpath);
uint64_t size = 0;
struct stat stbuf;
if (stat(bucketdata, &stbuf) == -1) {
PERROR("stat on bucket error");
errno = EIO;
pthread_mutex_unlock(&lock);
return -1;
}
size = (uint64_t) stbuf.st_size;
if (size < offset) {
WARN("offset for read is past the end: %llu vs %llu, bucket %s\n",
(unsigned long long) offset,
(unsigned long long) size,
bucketname(bucketpath));
pthread_mutex_unlock(&lock);
*bytes_read = 0;
return 0;
}
/*
if (e->bucket->size - offset < len) {
WARN("length + offset for read is past the end\n");
errno = ENXIO;
FREE(f);
pthread_mutex_unlock(&lock);
return -1;
}
*/
int fd = open(bucketdata, O_RDONLY);
if (fd == -1) {
PERROR("error opening file from cache dir");
errno = EBADF;
pthread_mutex_unlock(&lock);
return -1;
}
*bytes_read = pread(fd, buf, len, offset);
if (*bytes_read == -1) {
PERROR("error reading file from cache dir");
errno = EIO;
close(fd);
pthread_mutex_unlock(&lock);
return -1;
}
if (*bytes_read != len) {
DEBUG("read fewer than requested bytes from cache file: %llu instead of %llu\n",
(unsigned long long) *bytes_read,
(unsigned long long) len
);
/*
errno = EIO;
FREE(f);
FREE(cachefile);
close(fd);
pthread_mutex_unlock(&lock);
return -1;
*/
}
close(fd);
pthread_mutex_unlock(&lock);
//###
return 0;
}
uint64_t free_tail_bucket()
{
uint64_t freed_bytes = 0;
char *tail = fsll_getlink(cache_dir, "buckets/tail");
if (tail == NULL) {
ERROR("can't free the tail bucket, no buckets in queue!\n");
goto exit;
}
freed_bytes = free_bucket(tail);
DEBUG("freed %llu bytes in bucket %lu\n",
(unsigned long long)freed_bytes,
(unsigned long)bucket_path_to_number(tail));
exit:
FREE(tail);
return freed_bytes;
}
void make_space_available(uint64_t bytes_needed)
{
uint64_t bytes_freed = 0;
if (bytes_needed == 0)
return;
uint64_t dev_free = get_cache_fs_free_size(cache_dir);
if (dev_free >= bytes_needed) {
// device has plenty
if (use_whole_device) {
return;
} else {
// cache_size is limiting factor
if (cache_used_size + bytes_needed <= cache_size) {
return;
} else {
bytes_needed = (cache_used_size + bytes_needed) - cache_size;
}
}
} else {
// dev_free is limiting factor
bytes_needed = bytes_needed - dev_free;
}
DEBUG("need to free %llu bytes\n",
(unsigned long long) bytes_needed);
while (bytes_freed < bytes_needed) {
bytes_freed += free_tail_bucket();
}
DEBUG("freed %llu bytes total\n",
(unsigned long long) bytes_freed);
}
/*
* Adds a data block to the cache.
* Important: this must be the FULL block. All subsequent reads will
* assume that the full block is here.
*/
int cache_add(const char *filename, uint32_t block, const char *buf,
uint64_t len, time_t mtime)
{
if (len > bucket_max_size) {
errno = EOVERFLOW;
return -1;
}
if (len == 0) {
return 0;
}
char fileandblock[PATH_MAX];
snprintf(fileandblock, PATH_MAX, "map%s/%lu", filename, (unsigned long) block);
DEBUG("writing %llu bytes to %s\n", (unsigned long long) len, fileandblock);
//###
pthread_mutex_lock(&lock);
char *bucketpath = fsll_getlink(cache_dir, fileandblock);
if (bucketpath != NULL) {
if (fsll_file_exists(bucketpath, "data")) {
WARN("data already exists in cache\n");
FREE(bucketpath);
pthread_mutex_unlock(&lock);
return 0;
}
}
char *filemap = (char*)malloc(strlen(filename) + 4);
snprintf(filemap, strlen(filename)+4, "map%s", filename);
char *full_filemap_dir = (char*)malloc(strlen(cache_dir) + 5 + strlen(filename) + 1);
snprintf(full_filemap_dir, strlen(cache_dir)+5+strlen(filename)+1, "%s/map%s/",
cache_dir, filename);
DEBUG("map file = %s\n", filemap);
DEBUG("full filemap dir = %s\n", full_filemap_dir);
if (!fsll_file_exists(cache_dir, filemap)) {
FREE(filemap);
size_t i;
// start from "$cache_dir/map/"
for (i = strlen(cache_dir) + 5; i < strlen(full_filemap_dir); i++) {
if (full_filemap_dir[i] == '/') {
char *component = (char*)malloc(i+1);
strncpy(component, full_filemap_dir, i+1);
component[i] = '\0';
DEBUG("making %s\n", component);
if(mkdir(component, 0700) == -1 && errno != EEXIST) {
if (errno == ENOSPC) {
// try to free some space
DEBUG("mkdir says ENOSPC, freeing and trying again\n");
free_tail_bucket();
errno = EAGAIN;
}
else {
PERROR("mkdir in cache_add");
ERROR("\tcaused by mkdir(%s)\n", component);
errno = EIO;
}
FREE(component);
FREE(full_filemap_dir);
pthread_mutex_unlock(&lock);
return -1;
}
FREE(component);
}
}
} else {
FREE(filemap);
}
FREE(full_filemap_dir);
make_space_available(len);
FREE(bucketpath);
bucketpath = next_bucket();
DEBUG("bucket path = %s\n", bucketpath);
fsll_makelink(cache_dir, fileandblock, bucketpath);
char *fullfilemap = (char*)malloc(PATH_MAX);
snprintf(fullfilemap, PATH_MAX, "%s/%s", cache_dir, fileandblock);
fsll_makelink(bucketpath, "parent", fullfilemap);
FREE(fullfilemap);
// write mtime
char mtimepath[PATH_MAX];
snprintf(mtimepath, PATH_MAX, "%s/map%s/mtime", cache_dir, filename);
FILE *f = fopen(mtimepath, "w");
if (f == NULL) {
PERROR("opening mtime file in cache_add failed");
} else {
fprintf(f, "%llu\n", (unsigned long long) mtime);
fclose(f);
}
// finally, write data
char datapath[PATH_MAX];
snprintf(datapath, PATH_MAX, "%s/data", bucketpath);
int fd = open(datapath, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
if (fd == -1) {
PERROR("open in cache_add");
ERROR("\tcaused by open(%s, O_WRONLY|O_CREAT)\n", datapath);
errno = EIO;
pthread_mutex_unlock(&lock);
return -1;
}
ssize_t bytes_written = write(fd, buf, len);
if (bytes_written == -1) {
if (errno == ENOSPC) {
DEBUG("nothing written (no space on device)\n");
bytes_written = 0;
} else {
PERROR("write in cache_add");
errno = EIO;
close(fd);
pthread_mutex_unlock(&lock);
return -1;
}
}
DEBUG("%llu bytes written to cache\n",
(unsigned long long) bytes_written);
bool unchecked = is_unchecked(bucketpath);
if (!unchecked) {
cache_used_size += bytes_written;
}
// for some reason (filesystem metadata overhead?) this may need to loop a
// few times to write everything out.
while (bytes_written != len) {
DEBUG("not all bytes written to cache\n");
// Try again, more forcefully this time.
// Don't care if the FS says it has space, make some space anyway.
free_tail_bucket();
ssize_t more_bytes_written = write(fd, buf + bytes_written, len - bytes_written);
if (more_bytes_written == -1) {
if (errno == ENOSPC) {
// this is normal
DEBUG("nothing written (no space on device)\n");
more_bytes_written = 0;
} else {
PERROR("write error");
close(fd);
pthread_mutex_unlock(&lock);
return -EIO;
}
}
DEBUG("%llu more bytes written to cache (%llu total)\n",
(unsigned long long) more_bytes_written,
(unsigned long long) more_bytes_written + bytes_written);
if (!unchecked) {
cache_used_size += more_bytes_written;
}
bytes_written += more_bytes_written;
}
DEBUG("size now %llu bytes of %llu bytes (%lf%%)\n",
(unsigned long long) cache_used_size,
(unsigned long long) cache_size,
(double)100 * cache_used_size / cache_size
);
dump_queues();
close(fd);
pthread_mutex_unlock(&lock);
//###
FREE(bucketpath);
return 0;
}
int cache_has_file_real(const char *filename, uint64_t *cached_byte_count, bool do_lock)
{
DEBUG("cache_has_file %s\n", filename);
int ret = 0;
bool locked = false;
char *mapdir = NULL;
char *data = NULL;
DIR *dir = NULL;
if (filename == NULL || cached_byte_count == NULL) {
errno = EINVAL;
ret = -1;
goto exit;
}
if (do_lock) {
pthread_mutex_lock(&lock);
locked = true;
}
// Look up the file in the map.
asprintf(&mapdir, "%s/map%s", cache_dir, filename);
dir = opendir(mapdir);
if (dir == NULL) {
if (errno == ENOENT) {
DEBUG("not in cache (%s)\n", mapdir);
ret = 0;
goto exit;
}
else {
PERROR("opendir");
ERROR("\topendir on %s\n", mapdir);
ret = -EIO;
goto exit;
}
}
// Check if it's a file or directory (see if it has an mtime file).
asprintf(&data, "%s/mtime", mapdir);
bool is_file = false;
struct stat mtime_stat = {0};
if (0 == stat(data, &mtime_stat)) {
is_file = (mtime_stat.st_mode & S_IFREG) != 0;
}
else if (errno != ENOENT) {
PERROR("stat");
ERROR("\tstat on %s\n", mapdir);
ret = -EIO;
goto exit;
}
// Loop over the sub-entries in the map.
struct dirent *dirent = NULL;
while ((dirent = readdir(dir)) != NULL) {
if (dirent->d_name[0] != '.' && strcmp(dirent->d_name, "mtime") != 0) {
FREE(data);
if (is_file) {
asprintf(&data, "%s/%s/data", mapdir, dirent->d_name);
struct stat statbuf = {0};
if (0 != stat(data, &statbuf)) {
PERROR("stat");
ERROR("\tstat on %s\n", data);
ret = -EIO;
goto exit;
}
DEBUG("%llu bytes in %s\n", statbuf.st_size, data);
*cached_byte_count += statbuf.st_size;
}
else {
asprintf(&data, "%s/%s", filename, dirent->d_name);
ret = cache_has_file_real(data, cached_byte_count, false /*don't lock*/);
if (ret != 0) {
break;
}
}
}
}
exit:
if (locked)
pthread_mutex_unlock(&lock);
FREE(mapdir);
FREE(data);
closedir(dir);
return ret;
}
int cache_has_file(const char *filename, uint64_t *cached_bytes)
{
*cached_bytes = 0;
return cache_has_file_real(filename, cached_bytes, true);
}
int cache_rename(const char *path, const char *path_new)
{
DEBUG("cache_rename %s\n\t%s\n", path, path_new);
int ret = 0;
char *mapdir = NULL;
char *mapdir_new = NULL;
DIR *dir = NULL;
char *parentlink = NULL;
if (path == NULL || path_new == NULL) {
errno = EINVAL;
ret = -1;
goto exit;
}
// Look up and rename the cache map dir.
asprintf(&mapdir, "%s/map%s", cache_dir, path);
asprintf(&mapdir_new, "%s/map%s", cache_dir, path_new);
ret = rename(mapdir, mapdir_new);
if (0 != ret) {
if (ENOENT == errno) {
DEBUG("not in cache: %s\n", path);
ret = 0;
}
else {
PERROR("rename");
ret = -EIO;
}
goto exit;
}
// Next, need to fix all the buckets' parent links.
dir = opendir(mapdir_new);
if (dir == NULL) {
PERROR("opendir");
ERROR("\topendir on %s\n", mapdir_new);
ret = -EIO;
goto exit;
}
size_t cch_parentlink = strlen(mapdir_new) + 20;
parentlink = (char*)malloc(cch_parentlink);
struct dirent *dirent = NULL;
while ((dirent = readdir(dir)) != NULL) {
if (dirent->d_name[0] != '.' && strcmp(dirent->d_name, "mtime") != 0) {
snprintf(parentlink, cch_parentlink, "%s/%s/parent",
mapdir_new, dirent->d_name);
ret = unlink(parentlink);
if (0 != ret) {
PERROR("unlink");
ERROR("\tunlink on %s\n", parentlink);
ret = -EIO;
goto exit;
}
ret = symlink(mapdir_new, parentlink);
if (0 != ret) {
PERROR("symlink");
ERROR("\tsymlink from %s\n\tto %s\n", parentlink, mapdir_new);
ret = -EIO;
goto exit;
}
}
}
exit:
FREE(mapdir);
FREE(mapdir_new);
FREE(parentlink);
closedir(dir);
return ret;
}
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
| ninoles/backfs | fscache.c | C | gpl-2.0 | 35,045 |
/*
* Intel 3000/3010 Memory Controller kernel module
* Copyright (C) 2007 Akamai Technologies, Inc.
* Shamelessly copied from:
* Intel D82875P Memory Controller kernel module
* (C) 2003 Linux Networx (http://lnxi.com)
*
* This file may be distributed under the terms of the
* GNU General Public License.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_core.h"
#define I3000_REVISION "1.1"
#define EDAC_MOD_STR "i3000_edac"
#define I3000_RANKS 8
#define I3000_RANKS_PER_CHANNEL 4
#define I3000_CHANNELS 2
/* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */
#define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */
#define I3000_MCHBAR_MASK 0xffffc000
#define I3000_MMR_WINDOW_SIZE 16384
#define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
*
* 7:1 reserved
* 0 bit 32 of address
*/
#define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
*
* 31:7 address
* 6:1 reserved
* 0 Error channel 0/1
*/
#define I3000_DEAP_GRAIN (1 << 7)
/*
* Helper functions to decode the DEAP/EDEAP hardware registers.
*
* The type promotion here is deliberate; we're deriving an
* unsigned long pfn and offset from hardware regs which are u8/u32.
*/
static inline unsigned long deap_pfn(u8 edeap, u32 deap)
{
deap >>= PAGE_SHIFT;
deap |= (edeap & 1) << (32 - PAGE_SHIFT);
return deap;
}
static inline unsigned long deap_offset(u32 deap)
{
return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK;
}
static inline int deap_channel(u32 deap)
{
return deap & 1;
}
#define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
*
* 7:0 DRAM ECC Syndrome
*/
#define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
*
* 15:12 reserved
* 11 MCH Thermal Sensor Event
* for SMI/SCI/SERR
* 10 reserved
* 9 LOCK to non-DRAM Memory Flag (LCKF)
* 8 Received Refresh Timeout Flag (RRTOF)
* 7:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
#define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */
#define I3000_ERRSTS_UE 0x0002
#define I3000_ERRSTS_CE 0x0001
#define I3000_ERRCMD 0xca /* Error Command (16b)
*
* 15:12 reserved
* 11 SERR on MCH Thermal Sensor Event
* (TSESERR)
* 10 reserved
* 9 SERR on LOCK to non-DRAM Memory
* (LCKERR)
* 8 SERR on DRAM Refresh Timeout
* (DRTOERR)
* 7:2 reserved
* 1 SERR Multi-Bit DRAM ECC Error
* (DMERR)
* 0 SERR on Single-Bit ECC Error
* (DSERR)
*/
/* Intel MMIO register space - device 0 function 0 - MMR space */
#define I3000_DRB_SHIFT 25 /* 32MiB grain */
#define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
*
* 7:0 Channel 0 DRAM Rank Boundary Address
*/
#define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
*
* 7:0 Channel 1 DRAM Rank Boundary Address
*/
#define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
*
* 7 reserved
* 6:4 DRAM odd Rank Attribute
* 3 reserved
* 2:0 DRAM even Rank Attribute
*
* Each attribute defines the page
* size of the corresponding rank:
* 000: unpopulated
* 001: reserved
* 010: 4 KB
* 011: 8 KB
* 100: 16 KB
* Others: reserved
*/
#define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
static inline unsigned char odd_rank_attrib(unsigned char dra)
{
return (dra & 0x70) >> 4;
}
static inline unsigned char even_rank_attrib(unsigned char dra)
{
return dra & 0x07;
}
#define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
*
* 31:30 reserved
* 29 Initialization Complete (IC)
* 28:11 reserved
* 10:8 Refresh Mode Select (RMS)
* 7 reserved
* 6:4 Mode Select (SMS)
* 3:2 reserved
* 1:0 DRAM Type (DT)
*/
#define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
*
* 31 Enhanced Addressing Enable (ENHADE)
* 30:0 reserved
*/
enum i3000p_chips {
I3000 = 0,
};
struct i3000_dev_info {
const char *ctl_name;
};
struct i3000_error_info {
u16 errsts;
u8 derrsyn;
u8 edeap;
u32 deap;
u16 errsts2;
};
static const struct i3000_dev_info i3000_devs[] = {
[I3000] = {
.ctl_name = "i3000"},
};
static struct pci_dev *mci_pdev;
static int i3000_registered = 1;
static struct edac_pci_ctl_info *i3000_pci;
static void i3000_get_error_info(struct mem_ctl_info *mci,
struct i3000_error_info *info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->dev);
/*
* This is a mess because there is no atomic way to read all the
* registers at once and the registers can transition from CE being
* overwritten by UE.
*/
pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts);
if (!(info->errsts & I3000_ERRSTS_BITS))
return;
pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2);
/*
* If the error is the same for both reads then the first set
* of reads is valid. If there is a change then there is a CE
* with no info and the second set of reads is valid and
* should be UE info.
*/
if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
}
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
I3000_ERRSTS_BITS);
}
static int i3000_process_error_info(struct mem_ctl_info *mci,
struct i3000_error_info *info,
int handle_errors)
{
int row, multi_chan, channel;
unsigned long pfn, offset;
multi_chan = mci->csrows[0].nr_channels - 1;
if (!(info->errsts & I3000_ERRSTS_BITS))
return 0;
if (!handle_errors)
return 1;
if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
info->errsts = info->errsts2;
}
pfn = deap_pfn(info->edeap, info->deap);
offset = deap_offset(info->deap);
channel = deap_channel(info->deap);
row = edac_mc_find_csrow_by_page(mci, pfn);
if (info->errsts & I3000_ERRSTS_UE)
edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
else
edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
multi_chan ? channel : 0, "i3000 CE");
return 1;
}
static void i3000_check(struct mem_ctl_info *mci)
{
struct i3000_error_info info;
debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
i3000_get_error_info(mci, &info);
i3000_process_error_info(mci, &info, 1);
}
static int i3000_is_interleaved(const unsigned char *c0dra,
const unsigned char *c1dra,
const unsigned char *c0drb,
const unsigned char *c1drb)
{
int i;
/*
* If the channels aren't populated identically then
* we're not interleaved.
*/
for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) ||
even_rank_attrib(c0dra[i]) !=
even_rank_attrib(c1dra[i]))
return 0;
/*
* If the rank boundaries for the two channels are different
* then we're not interleaved.
*/
for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
if (c0drb[i] != c1drb[i])
return 0;
return 1;
}
static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
int i;
struct mem_ctl_info *mci = NULL;
unsigned long last_cumul_size;
int interleaved, nr_channels;
unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
unsigned long mchbar;
void __iomem *window;
debugf0("MC: %s()\n", __func__);
pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
mchbar &= I3000_MCHBAR_MASK;
window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
if (!window) {
printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
mchbar);
return -ENODEV;
}
c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */
for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) {
c0drb[i] = readb(window + I3000_C0DRB + i);
c1drb[i] = readb(window + I3000_C1DRB + i);
}
iounmap(window);
/*
* Figure out how many channels we have.
*
* If we have what the datasheet calls "asymmetric channels"
* (essentially the same as what was called "virtual single
* channel mode" in the i82875) then it's a single channel as
* far as EDAC is concerned.
*/
interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
nr_channels = interleaved ? 2 : 1;
mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
if (!mci)
return -ENOMEM;
debugf3("MC: %s(): init mci\n", __func__);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I3000_REVISION;
mci->ctl_name = i3000_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i3000_check;
mci->ctl_page_to_phys = NULL;
/*
* The dram rank boundary (DRB) reg values are boundary addresses
* for each DRAM rank with a granularity of 32MB. DRB regs are
* cumulative; the last one will contain the total memory
* contained in all ranks.
*
* If we're in interleaved mode then we're only walking through
* the ranks of controller 0, so we double all the values we see.
*/
for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
u8 value;
u32 cumul_size;
struct csrow_info *csrow = &mci->csrows[i];
value = drb[i];
cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
if (interleaved)
cumul_size <<= 1;
debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
__func__, i, cumul_size);
if (cumul_size == last_cumul_size) {
csrow->mtype = MEM_EMPTY;
continue;
}
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
csrow->grain = I3000_DEAP_GRAIN;
csrow->mtype = MEM_DDR2;
csrow->dtype = DEV_UNKNOWN;
csrow->edac_mode = EDAC_UNKNOWN;
}
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
I3000_ERRSTS_BITS);
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
/* allocating generic PCI control info */
i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!i3000_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
/* get this far and it's successful */
debugf3("MC: %s(): success\n", __func__);
return 0;
fail:
if (mci)
edac_mc_free(mci);
return rc;
}
/* returns count (>= 0), or negative on error */
static int __devinit i3000_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rc;
debugf0("MC: %s()\n", __func__);
if (pci_enable_device(pdev) < 0)
return -EIO;
rc = i3000_probe1(pdev, ent->driver_data);
if (!mci_pdev)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void __devexit i3000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
debugf0("%s()\n", __func__);
if (i3000_pci)
edac_pci_release_generic_ctl(i3000_pci);
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
edac_mc_free(mci);
}
static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
{
PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I3000},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
static struct pci_driver i3000_driver = {
.name = EDAC_MOD_STR,
.probe = i3000_init_one,
.remove = __devexit_p(i3000_remove_one),
.id_table = i3000_pci_tbl,
};
static int __init i3000_init(void)
{
int pci_rc;
debugf3("MC: %s()\n", __func__);
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i3000_driver);
if (pci_rc < 0)
goto fail0;
if (!mci_pdev) {
i3000_registered = 0;
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_3000_HB, NULL);
if (!mci_pdev) {
debugf0("i3000 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
if (pci_rc < 0) {
debugf0("i3000 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&i3000_driver);
fail0:
if (mci_pdev)
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit i3000_exit(void)
{
debugf3("MC: %s()\n", __func__);
pci_unregister_driver(&i3000_driver);
if (!i3000_registered) {
i3000_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
}
}
module_init(i3000_init);
module_exit(i3000_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| jeffegg/beaglebone | drivers/edac/i3000_edac.c | C | gpl-2.0 | 14,324 |
/*****************************************************************************\
* $Id: fillfile.c 77 2006-02-15 01:00:42Z garlick $
*****************************************************************************
* Copyright (C) 2001-2008 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Jim Garlick <garlick@llnl.gov>.
* UCRL-CODE-2003-006.
*
* This file is part of Scrub, a program for erasing disks.
* For details, see https://code.google.com/p/diskscrub/
*
* Scrub is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* Scrub is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along
* with Scrub; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
\*****************************************************************************/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if HAVE_PTHREAD_H
#include <pthread.h>
#endif
#include <assert.h>
#include "util.h"
#include "fillfile.h"
static int no_threads = 0;
struct memstruct {
refill_t refill;
unsigned char *buf;
int size;
#if WITH_PTHREADS
pthread_t thd;
int err;
#endif
};
extern char *prog;
#if defined(O_DIRECT) && (defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN))
# define MY_O_DIRECT O_DIRECT
#else
# define MY_O_DIRECT 0
#endif
static void *
refill_thread(void *arg)
{
struct memstruct *mp = (struct memstruct *)arg;
mp->refill(mp->buf, mp->size);
return mp;
}
static int
refill_memcpy(struct memstruct *mp, unsigned char *mem, int memsize,
off_t filesize, off_t written)
{
#if WITH_PTHREADS
if (no_threads) {
mp->size = memsize;
refill_thread (mp);
} else {
if ((mp->err = pthread_join(mp->thd, NULL))) {
errno = mp->err;
goto error;
}
assert (memsize == mp->size);
}
#else
mp->size = memsize;
refill_thread (mp);
#endif
memcpy(mem, mp->buf, memsize);
#if WITH_PTHREADS
if (!no_threads) {
written += memsize;
if (filesize - written > 0) {
if (mp->size > filesize - written)
mp->size = filesize - written;
if ((mp->err = pthread_create(&mp->thd, NULL, refill_thread, mp))) {
errno = mp->err;
goto error;
}
}
}
#endif
return 0;
error:
return -1;
}
static int
refill_init(struct memstruct **mpp, refill_t refill, int memsize)
{
struct memstruct *mp = NULL;
if (!(mp = malloc(sizeof(struct memstruct))))
goto nomem;
if (!(mp->buf = malloc(memsize)))
goto nomem;
mp->size = memsize;
mp->refill = refill;
#if WITH_PTHREADS
if (!no_threads) {
if ((mp->err = pthread_create(&mp->thd, NULL, refill_thread, mp))) {
errno = mp->err;
goto error;
}
}
#endif
*mpp = mp;
return 0;
nomem:
errno = ENOMEM;
error:
return -1;
}
static void
refill_fini(struct memstruct *mp)
{
#if WITH_PTHREADS
if (!no_threads)
(void)pthread_join(mp->thd, NULL);
#endif
free (mp->buf);
free (mp);
}
/* Fill file (can be regular or special file) with pattern in mem.
* Writes will use memsize blocks.
* If 'refill' is non-null, call it before each write (for random fill).
* If 'progress' is non-null, call it after each write (for progress meter).
* If 'sparse' is true, only scrub first and last blocks (for testing).
* The number of bytes written is returned.
* If 'creat' is true, open with O_CREAT and allow ENOSPC to be non-fatal.
*/
off_t
fillfile(char *path, off_t filesize, unsigned char *mem, int memsize,
progress_t progress, void *arg, refill_t refill,
bool sparse, bool creat)
{
int fd = -1;
off_t n;
off_t written = 0LL;
int openflags = O_WRONLY;
struct memstruct *mp = NULL;
if (filetype(path) != FILE_CHAR)
openflags |= MY_O_DIRECT;
if (creat)
openflags |= O_CREAT;
fd = open(path, openflags, 0644);
if (fd < 0 && errno == EINVAL && openflags & MY_O_DIRECT) {
/* Try again without (MY_)O_DIRECT */
openflags &= ~MY_O_DIRECT;
fd = open(path, openflags, 0644);
}
if (fd < 0)
goto error;
do {
if (written + memsize > filesize)
memsize = filesize - written;
if (refill && !sparse) {
if (!mp)
if (refill_init(&mp, refill, memsize) < 0)
goto error;
if (refill_memcpy(mp, mem, memsize, filesize, written) < 0)
goto error;
}
if (sparse && !(written == 0) && !(written + memsize == filesize)) {
if (lseek(fd, memsize, SEEK_CUR) < 0)
goto error;
written += memsize;
} else {
n = write_all(fd, mem, memsize);
if (creat && n < 0 && errno == ENOSPC)
break;
if (n == 0) {
errno = EINVAL; /* write past end of device? */
goto error;
} else if (n < 0)
goto error;
written += n;
}
if (progress)
progress(arg, (double)written/filesize);
} while (written < filesize);
if (fsync(fd) < 0) {
if (errno != EINVAL)
goto error;
errno = 0;
}
#if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED)
/* Try to fool the kernel into dropping any device cache */
(void)posix_fadvise(fd, 0, filesize, POSIX_FADV_DONTNEED);
#endif
if (close(fd) < 0)
goto error;
if (mp)
refill_fini(mp);
return written;
error:
if (mp)
refill_fini(mp);
if (fd != -1)
(void)close(fd);
return (off_t)-1;
}
/* Verify that file was filled with 'mem' patterns.
*/
off_t
checkfile(char *path, off_t filesize, unsigned char *mem, int memsize,
progress_t progress, void *arg, bool sparse)
{
int fd = -1;
off_t n;
off_t verified = 0LL;
unsigned char *buf = NULL;
int openflags = O_RDONLY;
if (!(buf = alloc_buffer(memsize)))
goto nomem;
if (filetype(path) != FILE_CHAR)
openflags |= MY_O_DIRECT;
fd = open(path, openflags);
if (fd < 0 && errno == EINVAL && openflags & MY_O_DIRECT) {
/* Try again without (MY_)O_DIRECT */
openflags &= ~MY_O_DIRECT;
fd = open(path, openflags);
}
if (fd < 0)
goto error;
do {
if (verified + memsize > filesize)
memsize = filesize - verified;
if (sparse && !(verified == 0) && !(verified + memsize == filesize)) {
if (lseek(fd, memsize, SEEK_CUR) < 0)
goto error;
verified += memsize;
} else {
n = read_all(fd, buf, memsize);
if (n < 0)
goto error;
if (n == 0) {
errno = EINVAL; /* early EOF */
goto error;
}
if (memcmp(mem, buf, memsize) != 0) {
break; /* return < filesize means verification failure */
}
verified += n;
}
if (progress)
progress(arg, (double)verified/filesize);
} while (verified < filesize);
if (close(fd) < 0)
goto error;
free(buf);
return verified;
nomem:
errno = ENOMEM;
error:
if (buf)
free (buf);
if (fd != -1)
(void)close(fd);
return (off_t)-1;
}
void
disable_threads(void)
{
no_threads = 1;
}
/*
* vi:tabstop=4 shiftwidth=4 expandtab
*/
| tjohansen14/diskscrub | src/fillfile.c | C | gpl-2.0 | 8,230 |
/* packet-laplink.c
* Routines for laplink dissection
* Copyright 2003, Brad Hards <bradh@frogmouth.net>
*
* $Id: packet-laplink.c 42632 2012-05-15 19:23:35Z wmeier $
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <glib.h>
#include <epan/packet.h>
#include <epan/strutil.h>
#include "packet-tcp.h"
#include <epan/prefs.h>
#define TCP_PORT_LAPLINK 1547
#define UDP_PORT_LAPLINK 1547
/* Initialize the protocol and registered fields */
static int proto_laplink = -1;
static int hf_laplink_udp_ident = -1;
static int hf_laplink_udp_name = -1;
static int hf_laplink_tcp_ident = -1;
static int hf_laplink_tcp_length = -1;
static int hf_laplink_tcp_data = -1;
/* Initialize the subtree pointers */
static gint ett_laplink = -1;
static const value_string laplink_udp_magic[] = {
{ 0x0f010000, "Name Solicitation" },
{ 0xf0000200, "Name Reply" },
{ 0, NULL }
};
static const value_string laplink_tcp_magic[] = {
{ 0xff08c000, "Unknown TCP query - connection?" },
{ 0xff08c200, "Unknown TCP query - connection?" },
{ 0xff0bc000, "Unknown TCP query - connection?" },
{ 0xff0bc200, "Unknown TCP query - connection?" },
{ 0xff10c000, "Unknown TCP response - connection?" },
{ 0xff10c200, "Unknown TCP response - connection?" },
{ 0xff11c000, "Unknown TCP query/response - directory list or file transfer?" },
{ 0xff11c200, "Unknown TCP query - directory list or file request?" },
{ 0xff13c000, "Unknown TCP response - connection?" },
{ 0xff13c200, "Unknown TCP response - connection?" },
{ 0xff14c000, "Unknown TCP response - directory list or file transfer?" },
{ 0, NULL }
};
static gboolean laplink_desegment = TRUE;
/* Code to actually dissect the packets - UDP */
static gint
dissect_laplink_udp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
{
int offset = 0;
proto_item *ti;
proto_tree *laplink_tree;
guint32 udp_ident;
const gchar *udp_ident_string;
/*
* Make sure the identifier is reasonable.
*/
if (!tvb_bytes_exist(tvb, offset, 4))
return 0; /* not enough bytes to check */
udp_ident = tvb_get_ntohl(tvb, offset);
udp_ident_string = match_strval(udp_ident, laplink_udp_magic);
if (udp_ident_string == NULL)
return 0; /* unknown */
/* Make entries in Protocol column and Info column on summary display */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "Laplink");
if (check_col(pinfo->cinfo, COL_INFO))
col_add_str(pinfo->cinfo, COL_INFO, udp_ident_string);
if (tree){
ti = proto_tree_add_item(tree, proto_laplink, tvb, 0, -1, ENC_NA);
laplink_tree = proto_item_add_subtree(ti, ett_laplink);
proto_tree_add_uint(laplink_tree, hf_laplink_udp_ident, tvb, offset, 4, udp_ident);
offset += 4;
proto_tree_add_item(laplink_tree, hf_laplink_udp_name, tvb, offset, -1, ENC_ASCII|ENC_NA);
}
return tvb_length(tvb);
}
/* Code to actually dissect the packets - TCP aspects*/
static void
dissect_laplink_tcp_pdu(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
{
int offset = 0;
int length = 0;
proto_item *ti;
proto_tree *laplink_tree;
guint32 tcp_ident;
/* Make entries in Protocol column and Info column on summary display */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "Laplink");
tcp_ident = tvb_get_ntohl(tvb, offset);
if (check_col(pinfo->cinfo, COL_INFO)) {
col_add_str(pinfo->cinfo, COL_INFO,
val_to_str(tcp_ident, laplink_tcp_magic, "TCP TBA (%u)"));
}
if (tree){
ti = proto_tree_add_item(tree, proto_laplink, tvb, 0, -1, ENC_NA);
laplink_tree = proto_item_add_subtree(ti, ett_laplink);
proto_tree_add_item(laplink_tree, hf_laplink_tcp_ident, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
length = tvb_get_ntohs(tvb, offset);
proto_tree_add_item(laplink_tree, hf_laplink_tcp_length, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
proto_tree_add_item(laplink_tree, hf_laplink_tcp_data, tvb, offset, length, ENC_NA);
/* Continue adding tree items to process the packet here */
}
/* If this protocol has a sub-dissector call it here, see section 1.8 */
}
static guint
get_laplink_pdu_len(packet_info *pinfo _U_, tvbuff_t *tvb, int offset)
{
guint plen;
/*
* The length doesn't include the length or ident fields; add those in.
*/
plen = (tvb_get_ntohs(tvb, offset+4) + 2 + 4);
return plen;
}
static void
dissect_laplink_tcp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
{
tcp_dissect_pdus(tvb, pinfo, tree, laplink_desegment,
6, get_laplink_pdu_len,
dissect_laplink_tcp_pdu);
}
/* Register the protocol with Wireshark */
void
proto_register_laplink(void)
{
/* Setup list of header fields See Section 1.6.1 for details*/
static hf_register_info hf[] = {
{ &hf_laplink_udp_ident,
{ "UDP Ident", "laplink.udp_ident",
FT_UINT32, BASE_HEX, VALS(laplink_udp_magic), 0x0,
"Unknown magic", HFILL }
},
{ &hf_laplink_udp_name,
{ "UDP Name", "laplink.udp_name",
FT_STRINGZ, BASE_NONE, NULL, 0x0,
"Machine name", HFILL }
},
{ &hf_laplink_tcp_ident,
{ "TCP Ident", "laplink.tcp_ident",
FT_UINT32, BASE_HEX, VALS(laplink_tcp_magic), 0x0,
"Unknown magic", HFILL }
},
{ &hf_laplink_tcp_length,
{ "TCP Data payload length", "laplink.tcp_length",
FT_UINT16, BASE_DEC, NULL, 0x0,
"Length of remaining payload", HFILL }
},
{ &hf_laplink_tcp_data,
{ "Unknown TCP data", "laplink.tcp_data",
FT_BYTES, BASE_NONE, NULL, 0x0,
"TCP data", HFILL }
},
};
/* Setup protocol subtree array */
static gint *ett[] = {
&ett_laplink,
};
module_t *laplink_module;
/* Register the protocol name and description */
proto_laplink = proto_register_protocol("Laplink",
"Laplink", "laplink");
/* Required function calls to register the header fields and subtrees used */
proto_register_field_array(proto_laplink, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
laplink_module = prefs_register_protocol(proto_laplink, NULL);
prefs_register_bool_preference(laplink_module, "desegment_laplink_over_tcp",
"Reassemble Laplink over TCP messages spanning multiple TCP segments",
"Whether the Laplink dissector should reassemble messages spanning multiple TCP segments."
" To use this option, you must also enable \"Allow subdissectors to reassemble TCP streams\" in the TCP protocol settings.",
&laplink_desegment);
}
/* If this dissector uses sub-dissector registration add a registration routine.
This format is required because a script is used to find these routines and
create the code that calls these routines.
*/
void
proto_reg_handoff_laplink(void)
{
dissector_handle_t laplink_udp_handle;
dissector_handle_t laplink_tcp_handle;
laplink_tcp_handle = create_dissector_handle(dissect_laplink_tcp,
proto_laplink);
dissector_add_uint("tcp.port", TCP_PORT_LAPLINK, laplink_tcp_handle);
laplink_udp_handle = new_create_dissector_handle(dissect_laplink_udp,
proto_laplink);
dissector_add_uint("udp.port", UDP_PORT_LAPLINK, laplink_udp_handle);
}
| Abhi9k/wireshark-dissector | epan/dissectors/packet-laplink.c | C | gpl-2.0 | 7,776 |
/* Discovery of auto-inc and auto-dec instructions.
Copyright (C) 2006-2015 Free Software Foundation, Inc.
Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
#include "double-int.h"
#include "input.h"
#include "alias.h"
#include "symtab.h"
#include "wide-int.h"
#include "inchash.h"
#include "tree.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "predict.h"
#include "vec.h"
#include "hashtab.h"
#include "hash-set.h"
#include "machmode.h"
#include "input.h"
#include "function.h"
#include "dominance.h"
#include "cfg.h"
#include "cfgrtl.h"
#include "basic-block.h"
#include "insn-config.h"
#include "regs.h"
#include "flags.h"
#include "except.h"
#include "diagnostic-core.h"
#include "recog.h"
#include "expr.h"
#include "tree-pass.h"
#include "df.h"
#include "dbgcnt.h"
#include "target.h"
/* This pass was originally removed from flow.c. However there is
almost nothing that remains of that code.
There are (4) basic forms that are matched:
(1) FORM_PRE_ADD
a <- b + c
...
*a
becomes
a <- b
...
*(a += c) pre
(2) FORM_PRE_INC
a += c
...
*a
becomes
*(a += c) pre
(3) FORM_POST_ADD
*a
...
b <- a + c
(For this case to be true, b must not be assigned or used between
the *a and the assignment to b. B must also be a Pmode reg.)
becomes
b <- a
...
*(b += c) post
(4) FORM_POST_INC
*a
...
a <- a + c
becomes
*(a += c) post
There are three types of values of c.
1) c is a constant equal to the width of the value being accessed by
the pointer. This is useful for machines that have
HAVE_PRE_INCREMENT, HAVE_POST_INCREMENT, HAVE_PRE_DECREMENT or
HAVE_POST_DECREMENT defined.
2) c is a constant not equal to the width of the value being accessed
by the pointer. This is useful for machines that have
HAVE_PRE_MODIFY_DISP, HAVE_POST_MODIFY_DISP defined.
3) c is a register. This is useful for machines that have
HAVE_PRE_MODIFY_REG, HAVE_POST_MODIFY_REG
The is one special case: if a already had an offset equal to it +-
its width and that offset is equal to -c when the increment was
before the ref or +c if the increment was after the ref, then if we
can do the combination but switch the pre/post bit. */
#ifdef AUTO_INC_DEC
enum form
{
FORM_PRE_ADD,
FORM_PRE_INC,
FORM_POST_ADD,
FORM_POST_INC,
FORM_last
};
/* The states of the second operands of mem refs and inc insns. If no
second operand of the mem_ref was found, it is assumed to just be
ZERO. SIZE is the size of the mode accessed in the memref. The
ANY is used for constants that are not +-size or 0. REG is used if
the forms are reg1 + reg2. */
enum inc_state
{
INC_ZERO, /* == 0 */
INC_NEG_SIZE, /* == +size */
INC_POS_SIZE, /* == -size */
INC_NEG_ANY, /* == some -constant */
INC_POS_ANY, /* == some +constant */
INC_REG, /* == some register */
INC_last
};
/* The eight forms that pre/post inc/dec can take. */
enum gen_form
{
NOTHING,
SIMPLE_PRE_INC, /* ++size */
SIMPLE_POST_INC, /* size++ */
SIMPLE_PRE_DEC, /* --size */
SIMPLE_POST_DEC, /* size-- */
DISP_PRE, /* ++con */
DISP_POST, /* con++ */
REG_PRE, /* ++reg */
REG_POST /* reg++ */
};
/* Tmp mem rtx for use in cost modeling. */
static rtx mem_tmp;
static enum inc_state
set_inc_state (HOST_WIDE_INT val, int size)
{
if (val == 0)
return INC_ZERO;
if (val < 0)
return (val == -size) ? INC_NEG_SIZE : INC_NEG_ANY;
else
return (val == size) ? INC_POS_SIZE : INC_POS_ANY;
}
/* The DECISION_TABLE that describes what form, if any, the increment
or decrement will take. It is a three dimensional table. The first
index is the type of constant or register found as the second
operand of the inc insn. The second index is the type of constant
or register found as the second operand of the memory reference (if
no second operand exists, 0 is used). The third index is the form
and location (relative to the mem reference) of inc insn. */
static bool initialized = false;
static enum gen_form decision_table[INC_last][INC_last][FORM_last];
static void
init_decision_table (void)
{
enum gen_form value;
if (HAVE_PRE_INCREMENT || HAVE_PRE_MODIFY_DISP)
{
/* Prefer the simple form if both are available. */
value = (HAVE_PRE_INCREMENT) ? SIMPLE_PRE_INC : DISP_PRE;
decision_table[INC_POS_SIZE][INC_ZERO][FORM_PRE_ADD] = value;
decision_table[INC_POS_SIZE][INC_ZERO][FORM_PRE_INC] = value;
decision_table[INC_POS_SIZE][INC_POS_SIZE][FORM_POST_ADD] = value;
decision_table[INC_POS_SIZE][INC_POS_SIZE][FORM_POST_INC] = value;
}
if (HAVE_POST_INCREMENT || HAVE_POST_MODIFY_DISP)
{
/* Prefer the simple form if both are available. */
value = (HAVE_POST_INCREMENT) ? SIMPLE_POST_INC : DISP_POST;
decision_table[INC_POS_SIZE][INC_ZERO][FORM_POST_ADD] = value;
decision_table[INC_POS_SIZE][INC_ZERO][FORM_POST_INC] = value;
decision_table[INC_POS_SIZE][INC_NEG_SIZE][FORM_PRE_ADD] = value;
decision_table[INC_POS_SIZE][INC_NEG_SIZE][FORM_PRE_INC] = value;
}
if (HAVE_PRE_DECREMENT || HAVE_PRE_MODIFY_DISP)
{
/* Prefer the simple form if both are available. */
value = (HAVE_PRE_DECREMENT) ? SIMPLE_PRE_DEC : DISP_PRE;
decision_table[INC_NEG_SIZE][INC_ZERO][FORM_PRE_ADD] = value;
decision_table[INC_NEG_SIZE][INC_ZERO][FORM_PRE_INC] = value;
decision_table[INC_NEG_SIZE][INC_NEG_SIZE][FORM_POST_ADD] = value;
decision_table[INC_NEG_SIZE][INC_NEG_SIZE][FORM_POST_INC] = value;
}
if (HAVE_POST_DECREMENT || HAVE_POST_MODIFY_DISP)
{
/* Prefer the simple form if both are available. */
value = (HAVE_POST_DECREMENT) ? SIMPLE_POST_DEC : DISP_POST;
decision_table[INC_NEG_SIZE][INC_ZERO][FORM_POST_ADD] = value;
decision_table[INC_NEG_SIZE][INC_ZERO][FORM_POST_INC] = value;
decision_table[INC_NEG_SIZE][INC_POS_SIZE][FORM_PRE_ADD] = value;
decision_table[INC_NEG_SIZE][INC_POS_SIZE][FORM_PRE_INC] = value;
}
if (HAVE_PRE_MODIFY_DISP)
{
decision_table[INC_POS_ANY][INC_ZERO][FORM_PRE_ADD] = DISP_PRE;
decision_table[INC_POS_ANY][INC_ZERO][FORM_PRE_INC] = DISP_PRE;
decision_table[INC_POS_ANY][INC_POS_ANY][FORM_POST_ADD] = DISP_PRE;
decision_table[INC_POS_ANY][INC_POS_ANY][FORM_POST_INC] = DISP_PRE;
decision_table[INC_NEG_ANY][INC_ZERO][FORM_PRE_ADD] = DISP_PRE;
decision_table[INC_NEG_ANY][INC_ZERO][FORM_PRE_INC] = DISP_PRE;
decision_table[INC_NEG_ANY][INC_NEG_ANY][FORM_POST_ADD] = DISP_PRE;
decision_table[INC_NEG_ANY][INC_NEG_ANY][FORM_POST_INC] = DISP_PRE;
}
if (HAVE_POST_MODIFY_DISP)
{
decision_table[INC_POS_ANY][INC_ZERO][FORM_POST_ADD] = DISP_POST;
decision_table[INC_POS_ANY][INC_ZERO][FORM_POST_INC] = DISP_POST;
decision_table[INC_POS_ANY][INC_NEG_ANY][FORM_PRE_ADD] = DISP_POST;
decision_table[INC_POS_ANY][INC_NEG_ANY][FORM_PRE_INC] = DISP_POST;
decision_table[INC_NEG_ANY][INC_ZERO][FORM_POST_ADD] = DISP_POST;
decision_table[INC_NEG_ANY][INC_ZERO][FORM_POST_INC] = DISP_POST;
decision_table[INC_NEG_ANY][INC_POS_ANY][FORM_PRE_ADD] = DISP_POST;
decision_table[INC_NEG_ANY][INC_POS_ANY][FORM_PRE_INC] = DISP_POST;
}
/* This is much simpler than the other cases because we do not look
for the reg1-reg2 case. Note that we do not have a INC_POS_REG
and INC_NEG_REG states. Most of the use of such states would be
on a target that had an R1 - R2 update address form.
There is the remote possibility that you could also catch a = a +
b; *(a - b) as a postdecrement of (a + b). However, it is
unclear if *(a - b) would ever be generated on a machine that did
not have that kind of addressing mode. The IA-64 and RS6000 will
not do this, and I cannot speak for any other. If any
architecture does have an a-b update for, these cases should be
added. */
if (HAVE_PRE_MODIFY_REG)
{
decision_table[INC_REG][INC_ZERO][FORM_PRE_ADD] = REG_PRE;
decision_table[INC_REG][INC_ZERO][FORM_PRE_INC] = REG_PRE;
decision_table[INC_REG][INC_REG][FORM_POST_ADD] = REG_PRE;
decision_table[INC_REG][INC_REG][FORM_POST_INC] = REG_PRE;
}
if (HAVE_POST_MODIFY_REG)
{
decision_table[INC_REG][INC_ZERO][FORM_POST_ADD] = REG_POST;
decision_table[INC_REG][INC_ZERO][FORM_POST_INC] = REG_POST;
}
initialized = true;
}
/* Parsed fields of an inc insn of the form "reg_res = reg0+reg1" or
"reg_res = reg0+c". */
static struct inc_insn
{
rtx_insn *insn; /* The insn being parsed. */
rtx pat; /* The pattern of the insn. */
bool reg1_is_const; /* True if reg1 is const, false if reg1 is a reg. */
enum form form;
rtx reg_res;
rtx reg0;
rtx reg1;
enum inc_state reg1_state;/* The form of the const if reg1 is a const. */
HOST_WIDE_INT reg1_val;/* Value if reg1 is const. */
} inc_insn;
/* Dump the parsed inc insn to FILE. */
static void
dump_inc_insn (FILE *file)
{
const char *f = ((inc_insn.form == FORM_PRE_ADD)
|| (inc_insn.form == FORM_PRE_INC)) ? "pre" : "post";
dump_insn_slim (file, inc_insn.insn);
switch (inc_insn.form)
{
case FORM_PRE_ADD:
case FORM_POST_ADD:
if (inc_insn.reg1_is_const)
fprintf (file, "found %s add(%d) r[%d]=r[%d]+%d\n",
f, INSN_UID (inc_insn.insn),
REGNO (inc_insn.reg_res),
REGNO (inc_insn.reg0), (int) inc_insn.reg1_val);
else
fprintf (file, "found %s add(%d) r[%d]=r[%d]+r[%d]\n",
f, INSN_UID (inc_insn.insn),
REGNO (inc_insn.reg_res),
REGNO (inc_insn.reg0), REGNO (inc_insn.reg1));
break;
case FORM_PRE_INC:
case FORM_POST_INC:
if (inc_insn.reg1_is_const)
fprintf (file, "found %s inc(%d) r[%d]+=%d\n",
f, INSN_UID (inc_insn.insn),
REGNO (inc_insn.reg_res), (int) inc_insn.reg1_val);
else
fprintf (file, "found %s inc(%d) r[%d]+=r[%d]\n",
f, INSN_UID (inc_insn.insn),
REGNO (inc_insn.reg_res), REGNO (inc_insn.reg1));
break;
default:
break;
}
}
/* Parsed fields of a mem ref of the form "*(reg0+reg1)" or "*(reg0+c)". */
static struct mem_insn
{
rtx_insn *insn; /* The insn being parsed. */
rtx pat; /* The pattern of the insn. */
rtx *mem_loc; /* The address of the field that holds the mem */
/* that is to be replaced. */
bool reg1_is_const; /* True if reg1 is const, false if reg1 is a reg. */
rtx reg0;
rtx reg1; /* This is either a reg or a const depending on
reg1_is_const. */
enum inc_state reg1_state;/* The form of the const if reg1 is a const. */
HOST_WIDE_INT reg1_val;/* Value if reg1 is const. */
} mem_insn;
/* Dump the parsed mem insn to FILE. */
static void
dump_mem_insn (FILE *file)
{
dump_insn_slim (file, mem_insn.insn);
if (mem_insn.reg1_is_const)
fprintf (file, "found mem(%d) *(r[%d]+%d)\n",
INSN_UID (mem_insn.insn),
REGNO (mem_insn.reg0), (int) mem_insn.reg1_val);
else
fprintf (file, "found mem(%d) *(r[%d]+r[%d])\n",
INSN_UID (mem_insn.insn),
REGNO (mem_insn.reg0), REGNO (mem_insn.reg1));
}
/* The following three arrays contain pointers to instructions. They
are indexed by REGNO. At any point in the basic block where we are
looking these three arrays contain, respectively, the next insn
that uses REGNO, the next inc or add insn that uses REGNO and the
next insn that sets REGNO.
The arrays are not cleared when we move from block to block so
whenever an insn is retrieved from these arrays, it's block number
must be compared with the current block.
*/
static rtx_insn **reg_next_use = NULL;
static rtx_insn **reg_next_inc_use = NULL;
static rtx_insn **reg_next_def = NULL;
/* Move dead note that match PATTERN to TO_INSN from FROM_INSN. We do
not really care about moving any other notes from the inc or add
insn. Moving the REG_EQUAL and REG_EQUIV is clearly wrong and it
does not appear that there are any other kinds of relevant notes. */
static void
move_dead_notes (rtx_insn *to_insn, rtx_insn *from_insn, rtx pattern)
{
rtx note;
rtx next_note;
rtx prev_note = NULL;
for (note = REG_NOTES (from_insn); note; note = next_note)
{
next_note = XEXP (note, 1);
if ((REG_NOTE_KIND (note) == REG_DEAD)
&& pattern == XEXP (note, 0))
{
XEXP (note, 1) = REG_NOTES (to_insn);
REG_NOTES (to_insn) = note;
if (prev_note)
XEXP (prev_note, 1) = next_note;
else
REG_NOTES (from_insn) = next_note;
}
else prev_note = note;
}
}
/* Create a mov insn DEST_REG <- SRC_REG and insert it before
NEXT_INSN. */
static rtx_insn *
insert_move_insn_before (rtx_insn *next_insn, rtx dest_reg, rtx src_reg)
{
rtx_insn *insns;
start_sequence ();
emit_move_insn (dest_reg, src_reg);
insns = get_insns ();
end_sequence ();
emit_insn_before (insns, next_insn);
return insns;
}
/* Change mem_insn.mem_loc so that uses NEW_ADDR which has an
increment of INC_REG. To have reached this point, the change is a
legitimate one from a dataflow point of view. The only questions
are is this a valid change to the instruction and is this a
profitable change to the instruction. */
static bool
attempt_change (rtx new_addr, rtx inc_reg)
{
/* There are four cases: For the two cases that involve an add
instruction, we are going to have to delete the add and insert a
mov. We are going to assume that the mov is free. This is
fairly early in the backend and there are a lot of opportunities
for removing that move later. In particular, there is the case
where the move may be dead, this is what dead code elimination
passes are for. The two cases where we have an inc insn will be
handled mov free. */
basic_block bb = BLOCK_FOR_INSN (mem_insn.insn);
rtx_insn *mov_insn = NULL;
int regno;
rtx mem = *mem_insn.mem_loc;
machine_mode mode = GET_MODE (mem);
rtx new_mem;
int old_cost = 0;
int new_cost = 0;
bool speed = optimize_bb_for_speed_p (bb);
PUT_MODE (mem_tmp, mode);
XEXP (mem_tmp, 0) = new_addr;
old_cost = (set_src_cost (mem, speed)
+ set_rtx_cost (PATTERN (inc_insn.insn), speed));
new_cost = set_src_cost (mem_tmp, speed);
/* The first item of business is to see if this is profitable. */
if (old_cost < new_cost)
{
if (dump_file)
fprintf (dump_file, "cost failure old=%d new=%d\n", old_cost, new_cost);
return false;
}
/* Jump through a lot of hoops to keep the attributes up to date. We
do not want to call one of the change address variants that take
an offset even though we know the offset in many cases. These
assume you are changing where the address is pointing by the
offset. */
new_mem = replace_equiv_address_nv (mem, new_addr);
if (! validate_change (mem_insn.insn, mem_insn.mem_loc, new_mem, 0))
{
if (dump_file)
fprintf (dump_file, "validation failure\n");
return false;
}
/* From here to the end of the function we are committed to the
change, i.e. nothing fails. Generate any necessary movs, move
any regnotes, and fix up the reg_next_{use,inc_use,def}. */
switch (inc_insn.form)
{
case FORM_PRE_ADD:
/* Replace the addition with a move. Do it at the location of
the addition since the operand of the addition may change
before the memory reference. */
mov_insn = insert_move_insn_before (inc_insn.insn,
inc_insn.reg_res, inc_insn.reg0);
move_dead_notes (mov_insn, inc_insn.insn, inc_insn.reg0);
regno = REGNO (inc_insn.reg_res);
reg_next_def[regno] = mov_insn;
reg_next_use[regno] = NULL;
regno = REGNO (inc_insn.reg0);
reg_next_use[regno] = mov_insn;
df_recompute_luids (bb);
break;
case FORM_POST_INC:
regno = REGNO (inc_insn.reg_res);
if (reg_next_use[regno] == reg_next_inc_use[regno])
reg_next_inc_use[regno] = NULL;
/* Fallthru. */
case FORM_PRE_INC:
regno = REGNO (inc_insn.reg_res);
reg_next_def[regno] = mem_insn.insn;
reg_next_use[regno] = NULL;
break;
case FORM_POST_ADD:
mov_insn = insert_move_insn_before (mem_insn.insn,
inc_insn.reg_res, inc_insn.reg0);
move_dead_notes (mov_insn, inc_insn.insn, inc_insn.reg0);
/* Do not move anything to the mov insn because the instruction
pointer for the main iteration has not yet hit that. It is
still pointing to the mem insn. */
regno = REGNO (inc_insn.reg_res);
reg_next_def[regno] = mem_insn.insn;
reg_next_use[regno] = NULL;
regno = REGNO (inc_insn.reg0);
reg_next_use[regno] = mem_insn.insn;
if ((reg_next_use[regno] == reg_next_inc_use[regno])
|| (reg_next_inc_use[regno] == inc_insn.insn))
reg_next_inc_use[regno] = NULL;
df_recompute_luids (bb);
break;
case FORM_last:
default:
gcc_unreachable ();
}
if (!inc_insn.reg1_is_const)
{
regno = REGNO (inc_insn.reg1);
reg_next_use[regno] = mem_insn.insn;
if ((reg_next_use[regno] == reg_next_inc_use[regno])
|| (reg_next_inc_use[regno] == inc_insn.insn))
reg_next_inc_use[regno] = NULL;
}
delete_insn (inc_insn.insn);
if (dump_file && mov_insn)
{
fprintf (dump_file, "inserting mov ");
dump_insn_slim (dump_file, mov_insn);
}
/* Record that this insn has an implicit side effect. */
add_reg_note (mem_insn.insn, REG_INC, inc_reg);
if (dump_file)
{
fprintf (dump_file, "****success ");
dump_insn_slim (dump_file, mem_insn.insn);
}
return true;
}
/* Try to combine the instruction in INC_INSN with the instruction in
MEM_INSN. First the form is determined using the DECISION_TABLE
and the results of parsing the INC_INSN and the MEM_INSN.
Assuming the form is ok, a prototype new address is built which is
passed to ATTEMPT_CHANGE for final processing. */
static bool
try_merge (void)
{
enum gen_form gen_form;
rtx mem = *mem_insn.mem_loc;
rtx inc_reg = inc_insn.form == FORM_POST_ADD ?
inc_insn.reg_res : mem_insn.reg0;
/* The width of the mem being accessed. */
int size = GET_MODE_SIZE (GET_MODE (mem));
rtx_insn *last_insn = NULL;
machine_mode reg_mode = GET_MODE (inc_reg);
switch (inc_insn.form)
{
case FORM_PRE_ADD:
case FORM_PRE_INC:
last_insn = mem_insn.insn;
break;
case FORM_POST_INC:
case FORM_POST_ADD:
last_insn = inc_insn.insn;
break;
case FORM_last:
default:
gcc_unreachable ();
}
/* Cannot handle auto inc of the stack. */
if (inc_reg == stack_pointer_rtx)
{
if (dump_file)
fprintf (dump_file, "cannot inc stack %d failure\n", REGNO (inc_reg));
return false;
}
/* Look to see if the inc register is dead after the memory
reference. If it is, do not do the combination. */
if (find_regno_note (last_insn, REG_DEAD, REGNO (inc_reg)))
{
if (dump_file)
fprintf (dump_file, "dead failure %d\n", REGNO (inc_reg));
return false;
}
mem_insn.reg1_state = (mem_insn.reg1_is_const)
? set_inc_state (mem_insn.reg1_val, size) : INC_REG;
inc_insn.reg1_state = (inc_insn.reg1_is_const)
? set_inc_state (inc_insn.reg1_val, size) : INC_REG;
/* Now get the form that we are generating. */
gen_form = decision_table
[inc_insn.reg1_state][mem_insn.reg1_state][inc_insn.form];
if (dbg_cnt (auto_inc_dec) == false)
return false;
switch (gen_form)
{
default:
case NOTHING:
return false;
case SIMPLE_PRE_INC: /* ++size */
if (dump_file)
fprintf (dump_file, "trying SIMPLE_PRE_INC\n");
return attempt_change (gen_rtx_PRE_INC (reg_mode, inc_reg), inc_reg);
break;
case SIMPLE_POST_INC: /* size++ */
if (dump_file)
fprintf (dump_file, "trying SIMPLE_POST_INC\n");
return attempt_change (gen_rtx_POST_INC (reg_mode, inc_reg), inc_reg);
break;
case SIMPLE_PRE_DEC: /* --size */
if (dump_file)
fprintf (dump_file, "trying SIMPLE_PRE_DEC\n");
return attempt_change (gen_rtx_PRE_DEC (reg_mode, inc_reg), inc_reg);
break;
case SIMPLE_POST_DEC: /* size-- */
if (dump_file)
fprintf (dump_file, "trying SIMPLE_POST_DEC\n");
return attempt_change (gen_rtx_POST_DEC (reg_mode, inc_reg), inc_reg);
break;
case DISP_PRE: /* ++con */
if (dump_file)
fprintf (dump_file, "trying DISP_PRE\n");
return attempt_change (gen_rtx_PRE_MODIFY (reg_mode,
inc_reg,
gen_rtx_PLUS (reg_mode,
inc_reg,
inc_insn.reg1)),
inc_reg);
break;
case DISP_POST: /* con++ */
if (dump_file)
fprintf (dump_file, "trying POST_DISP\n");
return attempt_change (gen_rtx_POST_MODIFY (reg_mode,
inc_reg,
gen_rtx_PLUS (reg_mode,
inc_reg,
inc_insn.reg1)),
inc_reg);
break;
case REG_PRE: /* ++reg */
if (dump_file)
fprintf (dump_file, "trying PRE_REG\n");
return attempt_change (gen_rtx_PRE_MODIFY (reg_mode,
inc_reg,
gen_rtx_PLUS (reg_mode,
inc_reg,
inc_insn.reg1)),
inc_reg);
break;
case REG_POST: /* reg++ */
if (dump_file)
fprintf (dump_file, "trying POST_REG\n");
return attempt_change (gen_rtx_POST_MODIFY (reg_mode,
inc_reg,
gen_rtx_PLUS (reg_mode,
inc_reg,
inc_insn.reg1)),
inc_reg);
break;
}
}
/* Return the next insn that uses (if reg_next_use is passed in
NEXT_ARRAY) or defines (if reg_next_def is passed in NEXT_ARRAY)
REGNO in BB. */
static rtx_insn *
get_next_ref (int regno, basic_block bb, rtx_insn **next_array)
{
rtx_insn *insn = next_array[regno];
/* Lazy about cleaning out the next_arrays. */
if (insn && BLOCK_FOR_INSN (insn) != bb)
{
next_array[regno] = NULL;
insn = NULL;
}
return insn;
}
/* Reverse the operands in a mem insn. */
static void
reverse_mem (void)
{
rtx tmp = mem_insn.reg1;
mem_insn.reg1 = mem_insn.reg0;
mem_insn.reg0 = tmp;
}
/* Reverse the operands in a inc insn. */
static void
reverse_inc (void)
{
rtx tmp = inc_insn.reg1;
inc_insn.reg1 = inc_insn.reg0;
inc_insn.reg0 = tmp;
}
/* Return true if INSN is of a form "a = b op c" where a and b are
regs. op is + if c is a reg and +|- if c is a const. Fill in
INC_INSN with what is found.
This function is called in two contexts, if BEFORE_MEM is true,
this is called for each insn in the basic block. If BEFORE_MEM is
false, it is called for the instruction in the block that uses the
index register for some memory reference that is currently being
processed. */
static bool
parse_add_or_inc (rtx_insn *insn, bool before_mem)
{
rtx pat = single_set (insn);
if (!pat)
return false;
/* Result must be single reg. */
if (!REG_P (SET_DEST (pat)))
return false;
if ((GET_CODE (SET_SRC (pat)) != PLUS)
&& (GET_CODE (SET_SRC (pat)) != MINUS))
return false;
if (!REG_P (XEXP (SET_SRC (pat), 0)))
return false;
inc_insn.insn = insn;
inc_insn.pat = pat;
inc_insn.reg_res = SET_DEST (pat);
inc_insn.reg0 = XEXP (SET_SRC (pat), 0);
if (rtx_equal_p (inc_insn.reg_res, inc_insn.reg0))
inc_insn.form = before_mem ? FORM_PRE_INC : FORM_POST_INC;
else
inc_insn.form = before_mem ? FORM_PRE_ADD : FORM_POST_ADD;
if (CONST_INT_P (XEXP (SET_SRC (pat), 1)))
{
/* Process a = b + c where c is a const. */
inc_insn.reg1_is_const = true;
if (GET_CODE (SET_SRC (pat)) == PLUS)
{
inc_insn.reg1 = XEXP (SET_SRC (pat), 1);
inc_insn.reg1_val = INTVAL (inc_insn.reg1);
}
else
{
inc_insn.reg1_val = -INTVAL (XEXP (SET_SRC (pat), 1));
inc_insn.reg1 = GEN_INT (inc_insn.reg1_val);
}
return true;
}
else if ((HAVE_PRE_MODIFY_REG || HAVE_POST_MODIFY_REG)
&& (REG_P (XEXP (SET_SRC (pat), 1)))
&& GET_CODE (SET_SRC (pat)) == PLUS)
{
/* Process a = b + c where c is a reg. */
inc_insn.reg1 = XEXP (SET_SRC (pat), 1);
inc_insn.reg1_is_const = false;
if (inc_insn.form == FORM_PRE_INC
|| inc_insn.form == FORM_POST_INC)
return true;
else if (rtx_equal_p (inc_insn.reg_res, inc_insn.reg1))
{
/* Reverse the two operands and turn *_ADD into *_INC since
a = c + a. */
reverse_inc ();
inc_insn.form = before_mem ? FORM_PRE_INC : FORM_POST_INC;
return true;
}
else
return true;
}
return false;
}
/* A recursive function that checks all of the mem uses in
ADDRESS_OF_X to see if any single one of them is compatible with
what has been found in inc_insn.
-1 is returned for success. 0 is returned if nothing was found and
1 is returned for failure. */
static int
find_address (rtx *address_of_x)
{
rtx x = *address_of_x;
enum rtx_code code = GET_CODE (x);
const char *const fmt = GET_RTX_FORMAT (code);
int i;
int value = 0;
int tem;
if (code == MEM && rtx_equal_p (XEXP (x, 0), inc_insn.reg_res))
{
/* Match with *reg0. */
mem_insn.mem_loc = address_of_x;
mem_insn.reg0 = inc_insn.reg_res;
mem_insn.reg1_is_const = true;
mem_insn.reg1_val = 0;
mem_insn.reg1 = GEN_INT (0);
return -1;
}
if (code == MEM && GET_CODE (XEXP (x, 0)) == PLUS
&& rtx_equal_p (XEXP (XEXP (x, 0), 0), inc_insn.reg_res))
{
rtx b = XEXP (XEXP (x, 0), 1);
mem_insn.mem_loc = address_of_x;
mem_insn.reg0 = inc_insn.reg_res;
mem_insn.reg1 = b;
mem_insn.reg1_is_const = inc_insn.reg1_is_const;
if (CONST_INT_P (b))
{
/* Match with *(reg0 + reg1) where reg1 is a const. */
HOST_WIDE_INT val = INTVAL (b);
if (inc_insn.reg1_is_const
&& (inc_insn.reg1_val == val || inc_insn.reg1_val == -val))
{
mem_insn.reg1_val = val;
return -1;
}
}
else if (!inc_insn.reg1_is_const
&& rtx_equal_p (inc_insn.reg1, b))
/* Match with *(reg0 + reg1). */
return -1;
}
if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
{
/* If REG occurs inside a MEM used in a bit-field reference,
that is unacceptable. */
if (find_address (&XEXP (x, 0)))
return 1;
}
if (x == inc_insn.reg_res)
return 1;
/* Time for some deep diving. */
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
{
tem = find_address (&XEXP (x, i));
/* If this is the first use, let it go so the rest of the
insn can be checked. */
if (value == 0)
value = tem;
else if (tem != 0)
/* More than one match was found. */
return 1;
}
else if (fmt[i] == 'E')
{
int j;
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
{
tem = find_address (&XVECEXP (x, i, j));
/* If this is the first use, let it go so the rest of
the insn can be checked. */
if (value == 0)
value = tem;
else if (tem != 0)
/* More than one match was found. */
return 1;
}
}
}
return value;
}
/* Once a suitable mem reference has been found and the MEM_INSN
structure has been filled in, FIND_INC is called to see if there is
a suitable add or inc insn that follows the mem reference and
determine if it is suitable to merge.
In the case where the MEM_INSN has two registers in the reference,
this function may be called recursively. The first time looking
for an add of the first register, and if that fails, looking for an
add of the second register. The FIRST_TRY parameter is used to
only allow the parameters to be reversed once. */
static bool
find_inc (bool first_try)
{
rtx_insn *insn;
basic_block bb = BLOCK_FOR_INSN (mem_insn.insn);
rtx_insn *other_insn;
df_ref def;
/* Make sure this reg appears only once in this insn. */
if (count_occurrences (PATTERN (mem_insn.insn), mem_insn.reg0, 1) != 1)
{
if (dump_file)
fprintf (dump_file, "mem count failure\n");
return false;
}
if (dump_file)
dump_mem_insn (dump_file);
/* Find the next use that is an inc. */
insn = get_next_ref (REGNO (mem_insn.reg0),
BLOCK_FOR_INSN (mem_insn.insn),
reg_next_inc_use);
if (!insn)
return false;
/* Even though we know the next use is an add or inc because it came
from the reg_next_inc_use, we must still reparse. */
if (!parse_add_or_inc (insn, false))
{
/* Next use was not an add. Look for one extra case. It could be
that we have:
*(a + b)
...= a;
...= b + a
if we reverse the operands in the mem ref we would
find this. Only try it once though. */
if (first_try && !mem_insn.reg1_is_const)
{
reverse_mem ();
return find_inc (false);
}
else
return false;
}
/* Need to assure that none of the operands of the inc instruction are
assigned to by the mem insn. */
FOR_EACH_INSN_DEF (def, mem_insn.insn)
{
unsigned int regno = DF_REF_REGNO (def);
if ((regno == REGNO (inc_insn.reg0))
|| (regno == REGNO (inc_insn.reg_res)))
{
if (dump_file)
fprintf (dump_file, "inc conflicts with store failure.\n");
return false;
}
if (!inc_insn.reg1_is_const && (regno == REGNO (inc_insn.reg1)))
{
if (dump_file)
fprintf (dump_file, "inc conflicts with store failure.\n");
return false;
}
}
if (dump_file)
dump_inc_insn (dump_file);
if (inc_insn.form == FORM_POST_ADD)
{
/* Make sure that there is no insn that assigns to inc_insn.res
between the mem_insn and the inc_insn. */
rtx_insn *other_insn = get_next_ref (REGNO (inc_insn.reg_res),
BLOCK_FOR_INSN (mem_insn.insn),
reg_next_def);
if (other_insn != inc_insn.insn)
{
if (dump_file)
fprintf (dump_file,
"result of add is assigned to between mem and inc insns.\n");
return false;
}
other_insn = get_next_ref (REGNO (inc_insn.reg_res),
BLOCK_FOR_INSN (mem_insn.insn),
reg_next_use);
if (other_insn
&& (other_insn != inc_insn.insn)
&& (DF_INSN_LUID (inc_insn.insn) > DF_INSN_LUID (other_insn)))
{
if (dump_file)
fprintf (dump_file,
"result of add is used between mem and inc insns.\n");
return false;
}
/* For the post_add to work, the result_reg of the inc must not be
used in the mem insn since this will become the new index
register. */
if (reg_overlap_mentioned_p (inc_insn.reg_res, PATTERN (mem_insn.insn)))
{
if (dump_file)
fprintf (dump_file, "base reg replacement failure.\n");
return false;
}
}
if (mem_insn.reg1_is_const)
{
if (mem_insn.reg1_val == 0)
{
if (!inc_insn.reg1_is_const)
{
/* The mem looks like *r0 and the rhs of the add has two
registers. */
int luid = DF_INSN_LUID (inc_insn.insn);
if (inc_insn.form == FORM_POST_ADD)
{
/* The trick is that we are not going to increment r0,
we are going to increment the result of the add insn.
For this trick to be correct, the result reg of
the inc must be a valid addressing reg. */
addr_space_t as = MEM_ADDR_SPACE (*mem_insn.mem_loc);
if (GET_MODE (inc_insn.reg_res)
!= targetm.addr_space.address_mode (as))
{
if (dump_file)
fprintf (dump_file, "base reg mode failure.\n");
return false;
}
/* We also need to make sure that the next use of
inc result is after the inc. */
other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_use);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
if (!rtx_equal_p (mem_insn.reg0, inc_insn.reg0))
reverse_inc ();
}
other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_def);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
}
}
/* Both the inc/add and the mem have a constant. Need to check
that the constants are ok. */
else if ((mem_insn.reg1_val != inc_insn.reg1_val)
&& (mem_insn.reg1_val != -inc_insn.reg1_val))
return false;
}
else
{
/* The mem insn is of the form *(a + b) where a and b are both
regs. It may be that in order to match the add or inc we
need to treat it as if it was *(b + a). It may also be that
the add is of the form a + c where c does not match b and
then we just abandon this. */
int luid = DF_INSN_LUID (inc_insn.insn);
rtx_insn *other_insn;
/* Make sure this reg appears only once in this insn. */
if (count_occurrences (PATTERN (mem_insn.insn), mem_insn.reg1, 1) != 1)
return false;
if (inc_insn.form == FORM_POST_ADD)
{
/* For this trick to be correct, the result reg of the inc
must be a valid addressing reg. */
addr_space_t as = MEM_ADDR_SPACE (*mem_insn.mem_loc);
if (GET_MODE (inc_insn.reg_res)
!= targetm.addr_space.address_mode (as))
{
if (dump_file)
fprintf (dump_file, "base reg mode failure.\n");
return false;
}
if (rtx_equal_p (mem_insn.reg0, inc_insn.reg0))
{
if (!rtx_equal_p (mem_insn.reg1, inc_insn.reg1))
{
/* See comment above on find_inc (false) call. */
if (first_try)
{
reverse_mem ();
return find_inc (false);
}
else
return false;
}
/* Need to check that there are no assignments to b
before the add insn. */
other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_def);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
/* All ok for the next step. */
}
else
{
/* We know that mem_insn.reg0 must equal inc_insn.reg1
or else we would not have found the inc insn. */
reverse_mem ();
if (!rtx_equal_p (mem_insn.reg0, inc_insn.reg0))
{
/* See comment above on find_inc (false) call. */
if (first_try)
return find_inc (false);
else
return false;
}
/* To have gotten here know that.
*(b + a)
... = (b + a)
We also know that the lhs of the inc is not b or a. We
need to make sure that there are no assignments to b
between the mem ref and the inc. */
other_insn
= get_next_ref (REGNO (inc_insn.reg0), bb, reg_next_def);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
}
/* Need to check that the next use of the add result is later than
add insn since this will be the reg incremented. */
other_insn
= get_next_ref (REGNO (inc_insn.reg_res), bb, reg_next_use);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
}
else /* FORM_POST_INC. There is less to check here because we
know that operands must line up. */
{
if (!rtx_equal_p (mem_insn.reg1, inc_insn.reg1))
/* See comment above on find_inc (false) call. */
{
if (first_try)
{
reverse_mem ();
return find_inc (false);
}
else
return false;
}
/* To have gotten here know that.
*(a + b)
... = (a + b)
We also know that the lhs of the inc is not b. We need to make
sure that there are no assignments to b between the mem ref and
the inc. */
other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_def);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
}
}
if (inc_insn.form == FORM_POST_INC)
{
other_insn
= get_next_ref (REGNO (inc_insn.reg0), bb, reg_next_use);
/* When we found inc_insn, we were looking for the
next add or inc, not the next insn that used the
reg. Because we are going to increment the reg
in this form, we need to make sure that there
were no intervening uses of reg. */
if (inc_insn.insn != other_insn)
return false;
}
return try_merge ();
}
/* A recursive function that walks ADDRESS_OF_X to find all of the mem
uses in pat that could be used as an auto inc or dec. It then
calls FIND_INC for each one. */
static bool
find_mem (rtx *address_of_x)
{
rtx x = *address_of_x;
enum rtx_code code = GET_CODE (x);
const char *const fmt = GET_RTX_FORMAT (code);
int i;
if (code == MEM && REG_P (XEXP (x, 0)))
{
/* Match with *reg0. */
mem_insn.mem_loc = address_of_x;
mem_insn.reg0 = XEXP (x, 0);
mem_insn.reg1_is_const = true;
mem_insn.reg1_val = 0;
mem_insn.reg1 = GEN_INT (0);
if (find_inc (true))
return true;
}
if (code == MEM && GET_CODE (XEXP (x, 0)) == PLUS
&& REG_P (XEXP (XEXP (x, 0), 0)))
{
rtx reg1 = XEXP (XEXP (x, 0), 1);
mem_insn.mem_loc = address_of_x;
mem_insn.reg0 = XEXP (XEXP (x, 0), 0);
mem_insn.reg1 = reg1;
if (CONST_INT_P (reg1))
{
mem_insn.reg1_is_const = true;
/* Match with *(reg0 + c) where c is a const. */
mem_insn.reg1_val = INTVAL (reg1);
if (find_inc (true))
return true;
}
else if (REG_P (reg1))
{
/* Match with *(reg0 + reg1). */
mem_insn.reg1_is_const = false;
if (find_inc (true))
return true;
}
}
if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
{
/* If REG occurs inside a MEM used in a bit-field reference,
that is unacceptable. */
return false;
}
/* Time for some deep diving. */
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
{
if (find_mem (&XEXP (x, i)))
return true;
}
else if (fmt[i] == 'E')
{
int j;
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
if (find_mem (&XVECEXP (x, i, j)))
return true;
}
}
return false;
}
/* Try to combine all incs and decs by constant values with memory
references in BB. */
static void
merge_in_block (int max_reg, basic_block bb)
{
rtx_insn *insn;
rtx_insn *curr;
int success_in_block = 0;
if (dump_file)
fprintf (dump_file, "\n\nstarting bb %d\n", bb->index);
FOR_BB_INSNS_REVERSE_SAFE (bb, insn, curr)
{
bool insn_is_add_or_inc = true;
if (!NONDEBUG_INSN_P (insn))
continue;
/* This continue is deliberate. We do not want the uses of the
jump put into reg_next_use because it is not considered safe to
combine a preincrement with a jump. */
if (JUMP_P (insn))
continue;
if (dump_file)
dump_insn_slim (dump_file, insn);
/* Does this instruction increment or decrement a register? */
if (parse_add_or_inc (insn, true))
{
int regno = REGNO (inc_insn.reg_res);
/* Cannot handle case where there are three separate regs
before a mem ref. Too many moves would be needed to be
profitable. */
if ((inc_insn.form == FORM_PRE_INC) || inc_insn.reg1_is_const)
{
mem_insn.insn = get_next_ref (regno, bb, reg_next_use);
if (mem_insn.insn)
{
bool ok = true;
if (!inc_insn.reg1_is_const)
{
/* We are only here if we are going to try a
HAVE_*_MODIFY_REG type transformation. c is a
reg and we must sure that the path from the
inc_insn to the mem_insn.insn is both def and use
clear of c because the inc insn is going to move
into the mem_insn.insn. */
int luid = DF_INSN_LUID (mem_insn.insn);
rtx_insn *other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_use);
if (other_insn && luid > DF_INSN_LUID (other_insn))
ok = false;
other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_def);
if (other_insn && luid > DF_INSN_LUID (other_insn))
ok = false;
}
if (dump_file)
dump_inc_insn (dump_file);
if (ok && find_address (&PATTERN (mem_insn.insn)) == -1)
{
if (dump_file)
dump_mem_insn (dump_file);
if (try_merge ())
{
success_in_block++;
insn_is_add_or_inc = false;
}
}
}
}
}
else
{
insn_is_add_or_inc = false;
mem_insn.insn = insn;
if (find_mem (&PATTERN (insn)))
success_in_block++;
}
/* If the inc insn was merged with a mem, the inc insn is gone
and there is noting to update. */
if (df_insn_info *insn_info = DF_INSN_INFO_GET (insn))
{
df_ref def, use;
/* Need to update next use. */
FOR_EACH_INSN_INFO_DEF (def, insn_info)
{
reg_next_use[DF_REF_REGNO (def)] = NULL;
reg_next_inc_use[DF_REF_REGNO (def)] = NULL;
reg_next_def[DF_REF_REGNO (def)] = insn;
}
FOR_EACH_INSN_INFO_USE (use, insn_info)
{
reg_next_use[DF_REF_REGNO (use)] = insn;
if (insn_is_add_or_inc)
reg_next_inc_use[DF_REF_REGNO (use)] = insn;
else
reg_next_inc_use[DF_REF_REGNO (use)] = NULL;
}
}
else if (dump_file)
fprintf (dump_file, "skipping update of deleted insn %d\n",
INSN_UID (insn));
}
/* If we were successful, try again. There may have been several
opportunities that were interleaved. This is rare but
gcc.c-torture/compile/pr17273.c actually exhibits this. */
if (success_in_block)
{
/* In this case, we must clear these vectors since the trick of
testing if the stale insn in the block will not work. */
memset (reg_next_use, 0, max_reg * sizeof (rtx));
memset (reg_next_inc_use, 0, max_reg * sizeof (rtx));
memset (reg_next_def, 0, max_reg * sizeof (rtx));
df_recompute_luids (bb);
merge_in_block (max_reg, bb);
}
}
#endif
/* Discover auto-inc auto-dec instructions. */
namespace {
const pass_data pass_data_inc_dec =
{
RTL_PASS, /* type */
"auto_inc_dec", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_AUTO_INC_DEC, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_df_finish, /* todo_flags_finish */
};
class pass_inc_dec : public rtl_opt_pass
{
public:
pass_inc_dec (gcc::context *ctxt)
: rtl_opt_pass (pass_data_inc_dec, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *)
{
#ifdef AUTO_INC_DEC
return (optimize > 0 && flag_auto_inc_dec);
#else
return false;
#endif
}
unsigned int execute (function *);
}; // class pass_inc_dec
unsigned int
pass_inc_dec::execute (function *fun ATTRIBUTE_UNUSED)
{
#ifdef AUTO_INC_DEC
basic_block bb;
int max_reg = max_reg_num ();
if (!initialized)
init_decision_table ();
mem_tmp = gen_rtx_MEM (Pmode, NULL_RTX);
df_note_add_problem ();
df_analyze ();
reg_next_use = XCNEWVEC (rtx_insn *, max_reg);
reg_next_inc_use = XCNEWVEC (rtx_insn *, max_reg);
reg_next_def = XCNEWVEC (rtx_insn *, max_reg);
FOR_EACH_BB_FN (bb, fun)
merge_in_block (max_reg, bb);
free (reg_next_use);
free (reg_next_inc_use);
free (reg_next_def);
mem_tmp = NULL;
#endif
return 0;
}
} // anon namespace
rtl_opt_pass *
make_pass_inc_dec (gcc::context *ctxt)
{
return new pass_inc_dec (ctxt);
}
| kito-cheng/gcc | gcc/auto-inc-dec.c | C | gpl-2.0 | 44,162 |
/*
* Copyright (C) 2003-2011 The Music Player Daemon Project
* http://www.musicpd.org
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include "pcm_volume.h"
#include "pcm_utils.h"
#include "audio_format.h"
#include <glib.h>
#include <stdint.h>
#include <string.h>
#undef G_LOG_DOMAIN
#define G_LOG_DOMAIN "pcm_volume"
static void
pcm_volume_change_8(int8_t *buffer, const int8_t *end, int volume)
{
while (buffer < end) {
int32_t sample = *buffer;
sample = (sample * volume + pcm_volume_dither() +
PCM_VOLUME_1 / 2)
/ PCM_VOLUME_1;
*buffer++ = pcm_range(sample, 8);
}
}
static void
pcm_volume_change_16(int16_t *buffer, const int16_t *end, int volume)
{
while (buffer < end) {
int32_t sample = *buffer;
sample = (sample * volume + pcm_volume_dither() +
PCM_VOLUME_1 / 2)
/ PCM_VOLUME_1;
*buffer++ = pcm_range(sample, 16);
}
}
#ifdef __i386__
/**
* Optimized volume function for i386. Use the EDX:EAX 2*32 bit
* multiplication result instead of emulating 64 bit multiplication.
*/
static inline int32_t
pcm_volume_sample_24(int32_t sample, int32_t volume, G_GNUC_UNUSED int32_t dither)
{
int32_t result;
asm(/* edx:eax = sample * volume */
"imul %2\n"
/* "add %3, %1\n" dithering disabled for now, because we
have no overflow check - is dithering really important
here? */
/* eax = edx:eax / PCM_VOLUME_1 */
"sal $22, %%edx\n"
"shr $10, %1\n"
"or %%edx, %1\n"
: "=a"(result)
: "0"(sample), "r"(volume) /* , "r"(dither) */
: "edx"
);
return result;
}
#endif
static void
pcm_volume_change_24(int32_t *buffer, const int32_t *end, int volume)
{
while (buffer < end) {
#ifdef __i386__
/* assembly version for i386 */
int32_t sample = *buffer;
sample = pcm_volume_sample_24(sample, volume,
pcm_volume_dither());
#else
/* portable version */
int64_t sample = *buffer;
sample = (sample * volume + pcm_volume_dither() +
PCM_VOLUME_1 / 2)
/ PCM_VOLUME_1;
#endif
*buffer++ = pcm_range(sample, 24);
}
}
static void
pcm_volume_change_32(int32_t *buffer, const int32_t *end, int volume)
{
while (buffer < end) {
#ifdef __i386__
/* assembly version for i386 */
int32_t sample = *buffer;
*buffer++ = pcm_volume_sample_24(sample, volume, 0);
#else
/* portable version */
int64_t sample = *buffer;
sample = (sample * volume + pcm_volume_dither() +
PCM_VOLUME_1 / 2)
/ PCM_VOLUME_1;
*buffer++ = pcm_range_64(sample, 32);
#endif
}
}
static void
pcm_volume_change_float(float *buffer, const float *end, float volume)
{
while (buffer < end) {
float sample = *buffer;
sample *= volume;
*buffer++ = sample;
}
}
bool
pcm_volume(void *buffer, size_t length,
enum sample_format format,
int volume)
{
if (volume == PCM_VOLUME_1)
return true;
if (volume <= 0) {
memset(buffer, 0, length);
return true;
}
const void *end = pcm_end_pointer(buffer, length);
switch (format) {
case SAMPLE_FORMAT_UNDEFINED:
case SAMPLE_FORMAT_S24:
case SAMPLE_FORMAT_DSD:
case SAMPLE_FORMAT_DSD_LSBFIRST:
/* not implemented */
return false;
case SAMPLE_FORMAT_S8:
pcm_volume_change_8(buffer, end, volume);
return true;
case SAMPLE_FORMAT_S16:
pcm_volume_change_16(buffer, end, volume);
return true;
case SAMPLE_FORMAT_S24_P32:
pcm_volume_change_24(buffer, end, volume);
return true;
case SAMPLE_FORMAT_S32:
pcm_volume_change_32(buffer, end, volume);
return true;
case SAMPLE_FORMAT_FLOAT:
pcm_volume_change_float(buffer, end,
pcm_volume_to_float(volume));
return true;
}
/* unreachable */
assert(false);
return false;
}
| andrewrk/mpd | src/pcm_volume.c | C | gpl-2.0 | 4,334 |
/*
* Copyright (C) 2008 by NXP Semiconductors
* All rights reserved.
*
* @Author: Kevin Wells
* @Descr: LPC3250 SLC NAND controller interface support functions
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include "lpc3250.h"
#include <nand.h>
#include <asm/errno.h>
#include <asm/io.h>
#define NAND_ALE_OFFS 4
#define NAND_CLE_OFFS 8
#define NAND_LARGE_BLOCK_PAGE_SIZE 2048
#define NAND_SMALL_BLOCK_PAGE_SIZE 512
static struct nand_ecclayout lpc32xx_nand_oob_16 = {
.eccbytes = 6,
.eccpos = {10, 11, 12, 13, 14, 15},
.oobfree = {
{.offset = 0,
. length = 4},
{.offset = 6,
. length = 4}
}
};
extern int nand_correct_data(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc);
/*
* DMA Descriptors
* For Large Block: 17 descriptors = ((16 Data and ECC Read) + 1 Spare Area)
* For Small Block: 5 descriptors = ((4 Data and ECC Read) + 1 Spare Area)
*/
static dmac_ll_t dmalist[(CONFIG_SYS_NAND_ECCSIZE/256) * 2 + 1];
static uint32_t ecc_buffer[8]; /* MAX ECC size */
static int dmachan = -1;
#define XFER_PENDING ((SLCNAND->slc_stat & SLCSTAT_DMA_FIFO) | SLCNAND->slc_tc)
static void lpc32xx_nand_init(void)
{
/* Enable clocks to the SLC NAND controller */
CLKPWR->clkpwr_nand_clk_ctrl = (CLKPWR_NANDCLK_SEL_SLC |
CLKPWR_NANDCLK_SLCCLK_EN);
/* Reset SLC NAND controller & clear ECC */
SLCNAND->slc_ctrl = (SLCCTRL_SW_RESET | SLCCTRL_ECC_CLEAR);
/* 8-bit bus, no DMA, CE normal */
SLCNAND->slc_cfg = 0;
/* Interrupts disabled and cleared */
SLCNAND->slc_ien = 0;
SLCNAND->slc_icr = (SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN);
SLCNAND->slc_tac = LPC32XX_SLC_NAND_TIMING;
}
static void lpc32xx_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct nand_chip *this = mtd->priv;
ulong IO_ADDR_W;
if (ctrl & NAND_CTRL_CHANGE) {
IO_ADDR_W = (ulong) this->IO_ADDR_W;
IO_ADDR_W &= ~(NAND_CLE_OFFS | NAND_ALE_OFFS);
if ( ctrl & NAND_CLE ) {
IO_ADDR_W |= NAND_CLE_OFFS;
}
else if ( ctrl & NAND_ALE ) {
IO_ADDR_W |= NAND_ALE_OFFS;
}
if ( ctrl & NAND_NCE ) {
SLCNAND->slc_cfg |= SLCCFG_CE_LOW;
}
else {
SLCNAND->slc_cfg &= ~SLCCFG_CE_LOW;
}
this->IO_ADDR_W = (void *) IO_ADDR_W;
}
if (cmd != NAND_CMD_NONE) {
writel(cmd, this->IO_ADDR_W);
}
}
static int lpc32xx_nand_ready(struct mtd_info *mtd)
{
/* Check the SLC NAND controller status */
return (SLCNAND->slc_stat & SLCSTAT_NAND_READY);
}
static u_char lpc32xx_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
unsigned long *pReg = (unsigned long *) this->IO_ADDR_R;
volatile unsigned long tmp32;
tmp32 = *pReg;
return (u_char) tmp32;
}
/*
* lpc32xx_verify_buf - [DEFAULT] Verify chip data against buffer
* mtd: MTD device structure
* buf: buffer containing the data to compare
* len: number of bytes to compare
*
* Default verify function for 8bit buswith
*/
static int lpc32xx_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
int i;
struct nand_chip *this = mtd->priv;
unsigned long *pReg = (unsigned long *) this->IO_ADDR_R;
volatile unsigned long tmp32;
for (i=0; i<len; i++) {
tmp32 = *pReg;
if (buf[i] != (u_char) tmp32)
return -EFAULT;
}
return 0;
}
/* Prepares DMA descriptors for NAND RD/WR operations */
/* If the size is < 256 Bytes then it is assumed to be
* an OOB transfer */
static void lpc32xx_nand_dma_configure(struct nand_chip *chip,
const void * buffer, int size, int read)
{
uint32_t i, dmasrc, ctrl, ecc_ctrl, oob_ctrl, dmadst;
void __iomem * base = chip->IO_ADDR_R;
uint32_t *ecc_gen = ecc_buffer;
/*
* CTRL descriptor entry for reading ECC
* Copy Multiple times to sync DMA with Flash Controller
*/
ecc_ctrl = (0x5 |
DMAC_CHAN_SRC_BURST_1 |
DMAC_CHAN_DEST_BURST_1 |
DMAC_CHAN_SRC_WIDTH_32 |
DMAC_CHAN_DEST_WIDTH_32 |
DMAC_CHAN_DEST_AHB1);
/* CTRL descriptor entry for reading/writing Data */
ctrl = 64 | /* 256/4 */
DMAC_CHAN_SRC_BURST_4 |
DMAC_CHAN_DEST_BURST_4 |
DMAC_CHAN_SRC_WIDTH_32 |
DMAC_CHAN_DEST_WIDTH_32 |
DMAC_CHAN_DEST_AHB1;
/* CTRL descriptor entry for reading/writing Spare Area */
oob_ctrl = ((CONFIG_SYS_NAND_OOBSIZE / 4) |
DMAC_CHAN_SRC_BURST_4 |
DMAC_CHAN_DEST_BURST_4 |
DMAC_CHAN_SRC_WIDTH_32 |
DMAC_CHAN_DEST_WIDTH_32 |
DMAC_CHAN_DEST_AHB1);
if (read) {
dmasrc = (uint32_t) (base + offsetof(SLCNAND_REGS_T, slc_dma_data));
dmadst = (uint32_t) (buffer);
ctrl |= DMAC_CHAN_DEST_AUTOINC;
} else {
dmadst = (uint32_t) (base + offsetof(SLCNAND_REGS_T, slc_dma_data));
dmasrc = (uint32_t) (buffer);
ctrl |= DMAC_CHAN_SRC_AUTOINC;
}
/*
* Write Operation Sequence for Small Block NAND
* ----------------------------------------------------------
* 1. X'fer 256 bytes of data from Memory to Flash.
* 2. Copy generated ECC data from Register to Spare Area
* 3. X'fer next 256 bytes of data from Memory to Flash.
* 4. Copy generated ECC data from Register to Spare Area.
* 5. X'fer 16 byets of Spare area from Memory to Flash.
* Read Operation Sequence for Small Block NAND
* ----------------------------------------------------------
* 1. X'fer 256 bytes of data from Flash to Memory.
* 2. Copy generated ECC data from Register to ECC calc Buffer.
* 3. X'fer next 256 bytes of data from Flash to Memory.
* 4. Copy generated ECC data from Register to ECC calc Buffer.
* 5. X'fer 16 bytes of Spare area from Flash to Memory.
* Write Operation Sequence for Large Block NAND
* ----------------------------------------------------------
* 1. Steps(1-4) of Write Operations repeate for four times
* which generates 16 DMA descriptors to X'fer 2048 bytes of
* data & 32 bytes of ECC data.
* 2. X'fer 64 bytes of Spare area from Memory to Flash.
* Read Operation Sequence for Large Block NAND
* ----------------------------------------------------------
* 1. Steps(1-4) of Read Operations repeate for four times
* which generates 16 DMA descriptors to X'fer 2048 bytes of
* data & 32 bytes of ECC data.
* 2. X'fer 64 bytes of Spare area from Flash to Memory.
*/
for (i = 0; i < size/256; i++) {
dmalist[i*2].dma_src = (read ?(dmasrc) :(dmasrc + (i*256)));
dmalist[i*2].dma_dest = (read ?(dmadst + (i*256)) :dmadst);
dmalist[i*2].next_lli = (uint32_t) & dmalist[(i*2)+1];
dmalist[i*2].next_ctrl = ctrl;
dmalist[(i*2) + 1].dma_src = (uint32_t)
(base + offsetof(SLCNAND_REGS_T, slc_ecc));
dmalist[(i*2) + 1].dma_dest = (uint32_t) & ecc_gen[i];
dmalist[(i*2) + 1].next_lli = (uint32_t) & dmalist[(i*2)+2];
dmalist[(i*2) + 1].next_ctrl = ecc_ctrl;
}
if (i) { /* Data only transfer */
dmalist[(i*2) - 1].next_lli = 0;
dmalist[(i*2) - 1].next_ctrl |= DMAC_CHAN_INT_TC_EN;
return ;
}
/* OOB only transfer */
if (read) {
dmasrc = (uint32_t) (base + offsetof(SLCNAND_REGS_T, slc_dma_data));
dmadst = (uint32_t) (buffer);
oob_ctrl |= DMAC_CHAN_DEST_AUTOINC;
} else {
dmadst = (uint32_t) (base + offsetof(SLCNAND_REGS_T, slc_dma_data));
dmasrc = (uint32_t) (buffer);
oob_ctrl |= DMAC_CHAN_SRC_AUTOINC;
}
/* Read/ Write Spare Area Data To/From Flash */
dmalist[i*2].dma_src = dmasrc;
dmalist[i*2].dma_dest = dmadst;
dmalist[i*2].next_lli = 0;
dmalist[i*2].next_ctrl = (oob_ctrl | DMAC_CHAN_INT_TC_EN);
}
static void lpc32xx_nand_xfer(struct mtd_info *mtd, const u_char *buf, int len, int read)
{
struct nand_chip *chip = mtd->priv;
uint32_t config;
/* DMA Channel Configuration */
config = (read ? DMAC_CHAN_FLOW_D_P2M : DMAC_CHAN_FLOW_D_M2P) |
(read ? DMAC_DEST_PERIP(0) : DMAC_DEST_PERIP(DMA_PERID_NAND1)) |
(read ? DMAC_SRC_PERIP(DMA_PERID_NAND1) : DMAC_SRC_PERIP(0)) |
DMAC_CHAN_ENABLE;
/* Prepare DMA descriptors */
lpc32xx_nand_dma_configure(chip, buf, len, read);
/* Setup SLC controller and start transfer */
if (read)
SLCNAND->slc_cfg |= SLCCFG_DMA_DIR;
else /* NAND_ECC_WRITE */
SLCNAND->slc_cfg &= ~SLCCFG_DMA_DIR;
SLCNAND->slc_cfg |= SLCCFG_DMA_BURST;
/* Write length for new transfers */
if (!XFER_PENDING)
SLCNAND->slc_tc = len +
(len != mtd->oobsize ? mtd->oobsize : 0);
SLCNAND->slc_ctrl |= SLCCTRL_DMA_START;
/* Start DMA transfers */
lpc32xx_dma_start_xfer(dmachan, dmalist, config);
/* Wait for NAND to be ready */
while(!lpc32xx_nand_ready(mtd));
/* Wait till DMA transfer is DONE */
if (lpc32xx_dma_wait_status(dmachan)) {
printk(KERN_ERR "NAND DMA transfer error!\r\n");
}
/* Stop DMA & HW ECC */
SLCNAND->slc_ctrl &= ~SLCCTRL_DMA_START;
SLCNAND->slc_cfg &= ~(SLCCFG_DMA_DIR | SLCCFG_DMA_BURST |
SLCCFG_ECC_EN | SLCCFG_DMA_ECC);
}
static uint32_t slc_ecc_copy_to_buffer(uint8_t * spare,
const uint32_t * ecc, int count)
{
int i;
for (i = 0; i < (count * 3); i += 3) {
uint32_t ce = ecc[i/3];
ce = ~(ce << 2) & 0xFFFFFF;
spare[i+2] = (uint8_t)(ce & 0xFF); ce >>= 8;
spare[i+1] = (uint8_t)(ce & 0xFF); ce >>= 8;
spare[i] = (uint8_t)(ce & 0xFF);
}
return 0;
}
static int lpc32xx_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code)
{
return slc_ecc_copy_to_buffer(ecc_code, ecc_buffer,
CONFIG_SYS_NAND_ECCSIZE == NAND_LARGE_BLOCK_PAGE_SIZE ? 8 : 2);
}
/*
* Enables and prepares SLC NAND controller
* for doing data transfers with H/W ECC enabled.
*/
static void lpc32xx_hwecc_enable(struct mtd_info *mtd, int mode)
{
/* Clear ECC */
SLCNAND->slc_ctrl = SLCCTRL_ECC_CLEAR;
/* Setup SLC controller for H/W ECC operations */
SLCNAND->slc_cfg |= (SLCCFG_ECC_EN | SLCCFG_DMA_ECC);
}
/*
* lpc32xx_write_buf - [DEFAULT] write buffer to chip
* mtd: MTD device structure
* buf: data buffer
* len: number of bytes to write
*
* Default write function for 8bit buswith
*/
static void lpc32xx_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
lpc32xx_nand_xfer(mtd, buf, len, 0);
}
/*
* lpc32xx_read_buf - [DEFAULT] read chip data into buffer
* mtd: MTD device structure
* buf: buffer to store date
* len: number of bytes to read
*
* Default read function for 8bit buswith
*/
static void lpc32xx_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
lpc32xx_nand_xfer(mtd, buf, len, 1);
}
int board_nand_init(struct nand_chip *nand)
{
/* Initial NAND interface */
lpc32xx_nand_init();
/* Acquire a channel for our use */
dmachan = lpc32xx_dma_get_channel();
if (unlikely(dmachan < 0)){
printk(KERN_INFO "Unable to get a free DMA "
"channel for NAND transfers\r\n");
return -1;
}
/* ECC mode and size */
nand->ecc.mode = NAND_ECC_HW;
nand->ecc.bytes = CONFIG_SYS_NAND_ECCBYTES;
nand->ecc.size = CONFIG_SYS_NAND_ECCSIZE;
if(CONFIG_SYS_NAND_ECCSIZE != NAND_LARGE_BLOCK_PAGE_SIZE)
nand->ecc.layout = &lpc32xx_nand_oob_16;
nand->ecc.calculate = lpc32xx_ecc_calculate;
nand->ecc.correct = nand_correct_data;
nand->ecc.hwctl = lpc32xx_hwecc_enable;
nand->cmd_ctrl = lpc32xx_nand_hwcontrol;
nand->dev_ready = lpc32xx_nand_ready;
nand->chip_delay = 2000;
nand->read_buf = lpc32xx_read_buf;
nand->write_buf = lpc32xx_write_buf;
nand->read_byte = lpc32xx_read_byte;
nand->verify_buf = lpc32xx_verify_buf;
return 0;
}
| diverger/uboot-lpc32xx | drivers/mtd/nand/lpc32xx_nand.c | C | gpl-2.0 | 11,991 |
/*
Copyright (C) 1997-2001 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "g_local.h"
#ifdef IML_Q2_EXTENSIONS
#include <stdio.h>
#include "binmsg.h"
#endif // IML_Q2_EXTENSIONS
void InitTrigger (edict_t *self)
{
if (!VectorCompare (self->s.angles, vec3_origin))
G_SetMovedir (self->s.angles, self->movedir);
self->solid = SOLID_TRIGGER;
self->movetype = MOVETYPE_NONE;
gi.setmodel (self, self->model);
self->svflags = SVF_NOCLIENT;
}
// the wait time has passed, so set back up for another activation
void multi_wait (edict_t *ent)
{
ent->nextthink = 0;
}
// the trigger was just activated
// ent->activator should be set to the activator so it can be held through a delay
// so wait for the delay time before firing
void multi_trigger (edict_t *ent)
{
if (ent->nextthink)
return; // already been triggered
G_UseTargets (ent, ent->activator);
if (ent->wait > 0)
{
ent->think = multi_wait;
ent->nextthink = level.time + ent->wait;
}
else
{ // we can't just remove (self) here, because this is a touch function
// called while looping through area links...
ent->touch = NULL;
ent->nextthink = level.time + FRAMETIME;
ent->think = G_FreeEdict;
}
}
void Use_Multi (edict_t *ent, edict_t *other, edict_t *activator)
{
ent->activator = activator;
multi_trigger (ent);
}
void Touch_Multi (edict_t *self, edict_t *other, cplane_t *plane, csurface_t *surf)
{
if(other->client)
{
if (self->spawnflags & 2)
return;
}
else if (other->svflags & SVF_MONSTER)
{
if (!(self->spawnflags & 1))
return;
}
else
return;
if (!VectorCompare(self->movedir, vec3_origin))
{
vec3_t forward;
AngleVectors(other->s.angles, forward, NULL, NULL);
if (_DotProduct(forward, self->movedir) < 0)
return;
}
self->activator = other;
multi_trigger (self);
}
/*QUAKED trigger_multiple (.5 .5 .5) ? MONSTER NOT_PLAYER TRIGGERED
Variable sized repeatable trigger. Must be targeted at one or more entities.
If "delay" is set, the trigger waits some time after activating before firing.
"wait" : Seconds between triggerings. (.2 default)
sounds
1) secret
2) beep beep
3) large switch
4)
set "message" to text string
*/
void trigger_enable (edict_t *self, edict_t *other, edict_t *activator)
{
self->solid = SOLID_TRIGGER;
self->use = Use_Multi;
gi.linkentity (self);
}
void SP_trigger_multiple (edict_t *ent)
{
if (ent->sounds == 1)
ent->noise_index = gi.soundindex ("misc/secret.wav");
else if (ent->sounds == 2)
ent->noise_index = gi.soundindex ("misc/talk.wav");
else if (ent->sounds == 3)
ent->noise_index = gi.soundindex ("misc/trigger1.wav");
if (!ent->wait)
ent->wait = 0.2;
ent->touch = Touch_Multi;
ent->movetype = MOVETYPE_NONE;
ent->svflags |= SVF_NOCLIENT;
if (ent->spawnflags & 4)
{
ent->solid = SOLID_NOT;
ent->use = trigger_enable;
}
else
{
ent->solid = SOLID_TRIGGER;
ent->use = Use_Multi;
}
if (!VectorCompare(ent->s.angles, vec3_origin))
G_SetMovedir (ent->s.angles, ent->movedir);
gi.setmodel (ent, ent->model);
gi.linkentity (ent);
}
/*QUAKED trigger_once (.5 .5 .5) ? x x TRIGGERED
Triggers once, then removes itself.
You must set the key "target" to the name of another object in the level that has a matching "targetname".
If TRIGGERED, this trigger must be triggered before it is live.
sounds
1) secret
2) beep beep
3) large switch
4)
"message" string to be displayed when triggered
*/
void SP_trigger_once(edict_t *ent)
{
// make old maps work because I messed up on flag assignments here
// triggered was on bit 1 when it should have been on bit 4
if (ent->spawnflags & 1)
{
vec3_t v;
VectorMA (ent->mins, 0.5, ent->size, v);
ent->spawnflags &= ~1;
ent->spawnflags |= 4;
gi.dprintf("fixed TRIGGERED flag on %s at %s\n", ent->classname, vtos(v));
}
ent->wait = -1;
SP_trigger_multiple (ent);
}
/*QUAKED trigger_relay (.5 .5 .5) (-8 -8 -8) (8 8 8)
This fixed size trigger cannot be touched, it can only be fired by other events.
*/
void trigger_relay_use (edict_t *self, edict_t *other, edict_t *activator)
{
G_UseTargets (self, activator);
}
void SP_trigger_relay (edict_t *self)
{
self->use = trigger_relay_use;
}
/*
==============================================================================
trigger_key
==============================================================================
*/
/*QUAKED trigger_key (.5 .5 .5) (-8 -8 -8) (8 8 8)
A relay trigger that only fires it's targets if player has the proper key.
Use "item" to specify the required key, for example "key_data_cd"
*/
void trigger_key_use (edict_t *self, edict_t *other, edict_t *activator)
{
int index;
if (!self->item)
return;
if (!activator->client)
return;
index = ITEM_INDEX(self->item);
if (!activator->client->pers.inventory[index])
{
if (level.time < self->touch_debounce_time)
return;
self->touch_debounce_time = level.time + 5.0;
gi.centerprintf (activator, "You need the %s", self->item->pickup_name);
gi.sound (activator, CHAN_AUTO, gi.soundindex ("misc/keytry.wav"), 1, ATTN_NORM, 0);
return;
}
gi.sound (activator, CHAN_AUTO, gi.soundindex ("misc/keyuse.wav"), 1, ATTN_NORM, 0);
if (coop->value)
{
int player;
edict_t *ent;
if (strcmp(self->item->classname, "key_power_cube") == 0)
{
int cube;
for (cube = 0; cube < 8; cube++)
if (activator->client->pers.power_cubes & (1 << cube))
break;
for (player = 1; player <= game.maxclients; player++)
{
ent = &g_edicts[player];
if (!ent->inuse)
continue;
if (!ent->client)
continue;
if (ent->client->pers.power_cubes & (1 << cube))
{
ent->client->pers.inventory[index]--;
ent->client->pers.power_cubes &= ~(1 << cube);
}
}
}
else
{
for (player = 1; player <= game.maxclients; player++)
{
ent = &g_edicts[player];
if (!ent->inuse)
continue;
if (!ent->client)
continue;
ent->client->pers.inventory[index] = 0;
}
}
}
else
{
activator->client->pers.inventory[index]--;
}
G_UseTargets (self, activator);
self->use = NULL;
}
void SP_trigger_key (edict_t *self)
{
if (!st.item)
{
gi.dprintf("no key item for trigger_key at %s\n", vtos(self->s.origin));
return;
}
self->item = FindItemByClassname (st.item);
if (!self->item)
{
gi.dprintf("item %s not found for trigger_key at %s\n", st.item, vtos(self->s.origin));
return;
}
if (!self->target)
{
gi.dprintf("%s at %s has no target\n", self->classname, vtos(self->s.origin));
return;
}
gi.soundindex ("misc/keytry.wav");
gi.soundindex ("misc/keyuse.wav");
self->use = trigger_key_use;
}
/*
==============================================================================
trigger_counter
==============================================================================
*/
/*QUAKED trigger_counter (.5 .5 .5) ? nomessage
Acts as an intermediary for an action that takes multiple inputs.
If nomessage is not set, t will print "1 more.. " etc when triggered and "sequence complete" when finished.
After the counter has been triggered "count" times (default 2), it will fire all of it's targets and remove itself.
*/
void trigger_counter_use(edict_t *self, edict_t *other, edict_t *activator)
{
if (self->count == 0)
return;
self->count--;
if (self->count)
{
if (! (self->spawnflags & 1))
{
gi.centerprintf(activator, "%i more to go...", self->count);
gi.sound (activator, CHAN_AUTO, gi.soundindex ("misc/talk1.wav"), 1, ATTN_NORM, 0);
}
return;
}
if (! (self->spawnflags & 1))
{
gi.centerprintf(activator, "Sequence completed!");
gi.sound (activator, CHAN_AUTO, gi.soundindex ("misc/talk1.wav"), 1, ATTN_NORM, 0);
}
self->activator = activator;
multi_trigger (self);
}
void SP_trigger_counter (edict_t *self)
{
self->wait = -1;
if (!self->count)
self->count = 2;
self->use = trigger_counter_use;
}
/*
==============================================================================
trigger_always
==============================================================================
*/
/*QUAKED trigger_always (.5 .5 .5) (-8 -8 -8) (8 8 8)
This trigger will always fire. It is activated by the world.
*/
void SP_trigger_always (edict_t *ent)
{
// we must have some delay to make sure our use targets are present
if (ent->delay < 0.2)
ent->delay = 0.2;
G_UseTargets(ent, ent);
}
/*
==============================================================================
trigger_push
==============================================================================
*/
#define PUSH_ONCE 1
static int windsound;
void trigger_push_touch (edict_t *self, edict_t *other, cplane_t *plane, csurface_t *surf)
{
if (strcmp(other->classname, "grenade") == 0)
{
VectorScale (self->movedir, self->speed * 10, other->velocity);
}
else if (other->health > 0)
{
VectorScale (self->movedir, self->speed * 10, other->velocity);
if (other->client)
{
// don't take falling damage immediately from this
VectorCopy (other->velocity, other->client->oldvelocity);
if (other->fly_sound_debounce_time < level.time)
{
other->fly_sound_debounce_time = level.time + 1.5;
gi.sound (other, CHAN_AUTO, windsound, 1, ATTN_NORM, 0);
}
}
}
if (self->spawnflags & PUSH_ONCE)
G_FreeEdict (self);
}
/*QUAKED trigger_push (.5 .5 .5) ? PUSH_ONCE
Pushes the player
"speed" defaults to 1000
*/
void SP_trigger_push (edict_t *self)
{
InitTrigger (self);
windsound = gi.soundindex ("misc/windfly.wav");
self->touch = trigger_push_touch;
if (!self->speed)
self->speed = 1000;
gi.linkentity (self);
}
/*
==============================================================================
trigger_hurt
==============================================================================
*/
/*QUAKED trigger_hurt (.5 .5 .5) ? START_OFF TOGGLE SILENT NO_PROTECTION SLOW
Any entity that touches this will be hurt.
It does dmg points of damage each server frame
SILENT supresses playing the sound
SLOW changes the damage rate to once per second
NO_PROTECTION *nothing* stops the damage
"dmg" default 5 (whole numbers only)
*/
void hurt_use (edict_t *self, edict_t *other, edict_t *activator)
{
if (self->solid == SOLID_NOT)
self->solid = SOLID_TRIGGER;
else
self->solid = SOLID_NOT;
gi.linkentity (self);
if (!(self->spawnflags & 2))
self->use = NULL;
}
void hurt_touch (edict_t *self, edict_t *other, cplane_t *plane, csurface_t *surf)
{
int dflags;
if (!other->takedamage)
return;
if (self->timestamp > level.time)
return;
if (self->spawnflags & 16)
self->timestamp = level.time + 1;
else
self->timestamp = level.time + FRAMETIME;
if (!(self->spawnflags & 4))
{
if ((level.framenum % 10) == 0)
gi.sound (other, CHAN_AUTO, self->noise_index, 1, ATTN_NORM, 0);
}
if (self->spawnflags & 8)
dflags = DAMAGE_NO_PROTECTION;
else
dflags = 0;
T_Damage (other, self, self, vec3_origin, other->s.origin, vec3_origin, self->dmg, self->dmg, dflags, MOD_TRIGGER_HURT);
}
void SP_trigger_hurt (edict_t *self)
{
InitTrigger (self);
self->noise_index = gi.soundindex ("world/electro.wav");
self->touch = hurt_touch;
if (!self->dmg)
self->dmg = 5;
if (self->spawnflags & 1)
self->solid = SOLID_NOT;
else
self->solid = SOLID_TRIGGER;
if (self->spawnflags & 2)
self->use = hurt_use;
gi.linkentity (self);
}
/*
==============================================================================
trigger_gravity
==============================================================================
*/
/*QUAKED trigger_gravity (.5 .5 .5) ?
Changes the touching entites gravity to
the value of "gravity". 1.0 is standard
gravity for the level.
*/
void trigger_gravity_touch (edict_t *self, edict_t *other, cplane_t *plane, csurface_t *surf)
{
other->gravity = self->gravity;
}
void SP_trigger_gravity (edict_t *self)
{
if (st.gravity == 0)
{
gi.dprintf("trigger_gravity without gravity set at %s\n", vtos(self->s.origin));
G_FreeEdict (self);
return;
}
InitTrigger (self);
self->gravity = atoi(st.gravity);
self->touch = trigger_gravity_touch;
}
/*
==============================================================================
trigger_monsterjump
==============================================================================
*/
/*QUAKED trigger_monsterjump (.5 .5 .5) ?
Walking monsters that touch this will jump in the direction of the trigger's angle
"speed" default to 200, the speed thrown forward
"height" default to 200, the speed thrown upwards
*/
void trigger_monsterjump_touch (edict_t *self, edict_t *other, cplane_t *plane, csurface_t *surf)
{
if (other->flags & (FL_FLY | FL_SWIM) )
return;
if (other->svflags & SVF_DEADMONSTER)
return;
if ( !(other->svflags & SVF_MONSTER))
return;
// set XY even if not on ground, so the jump will clear lips
other->velocity[0] = self->movedir[0] * self->speed;
other->velocity[1] = self->movedir[1] * self->speed;
if (!other->groundentity)
return;
other->groundentity = NULL;
other->velocity[2] = self->movedir[2];
}
void SP_trigger_monsterjump (edict_t *self)
{
if (!self->speed)
self->speed = 200;
if (!st.height)
st.height = 200;
if (self->s.angles[YAW] == 0)
self->s.angles[YAW] = 360;
InitTrigger (self);
self->touch = trigger_monsterjump_touch;
self->movedir[2] = st.height;
}
/*
==============================================================================
trigger_region
==============================================================================
*/
#ifdef IML_Q2_EXTENSIONS
static void AddRegion(edict_t *player, edict_t *region)
{
int i;
for (i = 0; i < MAX_REGIONS; i++)
{
if (player->client->in_regions[i] == region)
{
break;
}
else if (player->client->in_regions[i] == NULL)
{
player->client->in_regions[i] = region;
break;
}
}
}
void trigger_region_touch (edict_t *self, edict_t *other, cplane_t *plane,
csurface_t *surf)
{
if (other->client)
AddRegion(other, self);
}
void SP_trigger_region (edict_t *self)
{
InitTrigger (self);
self->touch = &trigger_region_touch;
}
static qboolean EntityInList (edict_t *ent, edict_t **list, size_t items)
{
int i;
for (i = 0; i < items; i++)
if (list[i] == ent)
return true;
return false;
}
static void SendBinMsg_InRegion(edict_t *player, char *region_name,
qboolean is_in_region)
{
char key[MAX_STRING_CHARS];
binmsg_byte buffer[BINMSG_MAX_SIZE];
binmsg_message msg;
// Format our key name.
_snprintf(key, sizeof(key), "in-region?/%s", region_name);
key[MAX_STRING_CHARS-1] = '\0';
// Build and send the message.
if (!binmsg_build(&msg, buffer, BINMSG_MAX_SIZE, "state"))
return;
if (!binmsg_add_string(&msg.args, key))
return;
if (!binmsg_add_bool(&msg.args, is_in_region))
return;
if (!binmsg_build_done(&msg))
return;
SendBinMsg(player, msg.buffer, msg.buffer_size);
}
static void ExitRegion (edict_t *player, edict_t *region)
{
if (region->region_name)
SendBinMsg_InRegion(player, region->region_name, false);
if (region->exit_target)
G_UseTargetsByName(region, region->exit_target, player);
}
static void EnterRegion (edict_t *player, edict_t *region)
{
if (region->region_name)
SendBinMsg_InRegion(player, region->region_name, true);
if (region->enter_target)
G_UseTargetsByName(region, region->enter_target, player);
}
void CheckRegions (edict_t *player)
{
int i;
// Send exit messages.
for (i = 0; i < MAX_REGIONS; i++)
if (!EntityInList(player->client->in_regions_old[i],
player->client->in_regions, MAX_REGIONS))
ExitRegion(player, player->client->in_regions_old[i]);
// Send enter messages.
for (i = 0; i < MAX_REGIONS; i++)
if (!EntityInList(player->client->in_regions[i],
player->client->in_regions_old, MAX_REGIONS))
EnterRegion(player, player->client->in_regions[i]);
// Move in_regions to in_regions_old and clear in_regions.
memcpy(player->client->in_regions_old,
player->client->in_regions, sizeof(edict_t*) * MAX_REGIONS);
memset(player->client->in_regions, 0, sizeof(edict_t*) * MAX_REGIONS);
}
#endif // IML_Q2_EXTENSIONS
| lambda/wxQuake2 | game/g_trigger.c | C | gpl-2.0 | 17,071 |
#include "capwap.h"
#include "capwap_element.h"
/********************************************************************
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Radio ID | MAC Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| MAC Address | QoS Sub-Element... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
0 1
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved|8021p|RSV| DSCP Tag |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Type: 1043 for IEEE 802.11 Update Station QoS
Length: 14
********************************************************************/
/* */
static void capwap_80211_updatestationqos_element_create(void* data, capwap_message_elements_handle handle, struct capwap_write_message_elements_ops* func) {
int i;
struct capwap_80211_updatestationqos_element* element = (struct capwap_80211_updatestationqos_element*)data;
ASSERT(data != NULL);
func->write_u8(handle, element->radioid);
func->write_block(handle, element->address, MACADDRESS_EUI48_LENGTH);
for (i = 0; i < CAPWAP_UPDATE_STATION_QOS_SUBELEMENTS; i++) {
func->write_u8(handle, element->qos[i].priority8021p & CAPWAP_UPDATE_STATION_QOS_PRIORIY_MASK);
func->write_u8(handle, element->qos[i].dscp & CAPWAP_UPDATE_STATION_QOS_DSCP_MASK);
}
}
/* */
static void* capwap_80211_updatestationqos_element_parsing(capwap_message_elements_handle handle, struct capwap_read_message_elements_ops* func) {
int i;
struct capwap_80211_updatestationqos_element* data;
ASSERT(handle != NULL);
ASSERT(func != NULL);
if (func->read_ready(handle) != 14) {
capwap_logging_debug("Invalid IEEE 802.11 Update Station QoS element");
return NULL;
}
/* */
data = (struct capwap_80211_updatestationqos_element*)capwap_alloc(sizeof(struct capwap_80211_updatestationqos_element));
memset(data, 0, sizeof(struct capwap_80211_updatestationqos_element));
/* Retrieve data */
func->read_u8(handle, &data->radioid);
func->read_block(handle, data->address, MACADDRESS_EUI48_LENGTH);
for (i = 0; i < CAPWAP_UPDATE_STATION_QOS_SUBELEMENTS; i++) {
func->read_u8(handle, &data->qos[i].priority8021p);
data->qos[i].priority8021p &= CAPWAP_UPDATE_STATION_QOS_PRIORIY_MASK;
func->read_u8(handle, &data->qos[i].dscp);
data->qos[i].dscp &= CAPWAP_UPDATE_STATION_QOS_DSCP_MASK;
}
return data;
}
/* */
static void* capwap_80211_updatestationqos_element_clone(void* data) {
ASSERT(data != NULL);
return capwap_clone(data, sizeof(struct capwap_80211_updatestationqos_element));
}
/* */
static void capwap_80211_updatestationqos_element_free(void* data) {
ASSERT(data != NULL);
capwap_free(data);
}
/* */
struct capwap_message_elements_ops capwap_element_80211_updatestationqos_ops = {
.create_message_element = capwap_80211_updatestationqos_element_create,
.parsing_message_element = capwap_80211_updatestationqos_element_parsing,
.clone_message_element = capwap_80211_updatestationqos_element_clone,
.free_message_element = capwap_80211_updatestationqos_element_free
};
| alagoutte/smartcapwap | src/common/capwap_element_80211_updatestationqos.c | C | gpl-2.0 | 3,276 |
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* null audio source
*/
#include <inttypes.h>
#include <stdio.h>
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "avfilter.h"
#include "internal.h"
static int request_frame(AVFilterLink *link)
{
return AVERROR_EOF;
}
static const AVFilterPad avfilter_asrc_anullsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter avfilter_asrc_anullsrc = {
.name = "anullsrc",
.description = NULL_IF_CONFIG_SMALL("Null audio source, never return audio frames."),
.inputs = NULL,
.outputs = avfilter_asrc_anullsrc_outputs,
};
| DDTChen/CookieVLC | vlc/contrib/android/ffmpeg/libavfilter/asrc_anullsrc.c | C | gpl-2.0 | 1,481 |
/*
* Copyright (c) 1990,1993 Regents of The University of Michigan.
* All Rights Reserved. See COPYRIGHT.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif /* HAVE_CONFIG_H */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <sys/param.h>
#include <sys/uio.h>
#include <sys/time.h>
#include <sys/socket.h>
#include <sys/poll.h>
#include <errno.h>
#include <sys/wait.h>
#include <sys/resource.h>
#include <atalk/logger.h>
#include <atalk/adouble.h>
#include <atalk/compat.h>
#include <atalk/dsi.h>
#include <atalk/afp.h>
#include <atalk/paths.h>
#include <atalk/util.h>
#include <atalk/server_child.h>
#include <atalk/server_ipc.h>
#include <atalk/errchk.h>
#include <atalk/globals.h>
#include <atalk/netatalk_conf.h>
#include "afp_config.h"
#include "status.h"
#include "fork.h"
#include "uam_auth.h"
#include "afp_zeroconf.h"
#define AFP_LISTENERS 32
#define FDSET_SAFETY 5
unsigned char nologin = 0;
static AFPObj obj;
static server_child *server_children;
static sig_atomic_t reloadconfig = 0;
static sig_atomic_t gotsigchld = 0;
/* Two pointers to dynamic allocated arrays which store pollfds and associated data */
static struct pollfd *fdset;
static struct polldata *polldata;
static int fdset_size; /* current allocated size */
static int fdset_used; /* number of used elements */
static int disasociated_ipc_fd; /* disasociated sessions uses this fd for IPC */
static afp_child_t *dsi_start(AFPObj *obj, DSI *dsi, server_child *server_children);
static void afp_exit(int ret)
{
exit(ret);
}
/* ------------------
initialize fd set we are waiting for.
*/
static void fd_set_listening_sockets(const AFPObj *config)
{
DSI *dsi;
for (dsi = config->dsi; dsi; dsi = dsi->next) {
fdset_add_fd(config->options.connections + AFP_LISTENERS + FDSET_SAFETY,
&fdset,
&polldata,
&fdset_used,
&fdset_size,
dsi->serversock,
LISTEN_FD,
dsi);
}
if (config->options.flags & OPTION_KEEPSESSIONS)
fdset_add_fd(config->options.connections + AFP_LISTENERS + FDSET_SAFETY,
&fdset,
&polldata,
&fdset_used,
&fdset_size,
disasociated_ipc_fd,
DISASOCIATED_IPC_FD,
NULL);
}
static void fd_reset_listening_sockets(const AFPObj *config)
{
const DSI *dsi;
for (dsi = config->dsi; dsi; dsi = dsi->next) {
fdset_del_fd(&fdset, &polldata, &fdset_used, &fdset_size, dsi->serversock);
}
if (config->options.flags & OPTION_KEEPSESSIONS)
fdset_del_fd(&fdset, &polldata, &fdset_used, &fdset_size, disasociated_ipc_fd);
}
/* ------------------ */
static void afp_goaway(int sig)
{
switch( sig ) {
case SIGTERM:
case SIGQUIT:
switch (sig) {
case SIGTERM:
LOG(log_note, logtype_afpd, "AFP Server shutting down on SIGTERM");
break;
case SIGQUIT:
if (obj.options.flags & OPTION_KEEPSESSIONS) {
LOG(log_note, logtype_afpd, "AFP Server shutting down on SIGQUIT, NOT disconnecting clients");
} else {
LOG(log_note, logtype_afpd, "AFP Server shutting down on SIGQUIT");
sig = SIGTERM;
}
break;
}
if (server_children)
server_child_kill(server_children, CHILD_DSIFORK, sig);
_exit(0);
break;
case SIGUSR1 :
nologin++;
auth_unload();
LOG(log_info, logtype_afpd, "disallowing logins");
if (server_children)
server_child_kill(server_children, CHILD_DSIFORK, sig);
break;
case SIGHUP :
/* w/ a configuration file, we can force a re-read if we want */
reloadconfig = 1;
break;
case SIGCHLD:
/* w/ a configuration file, we can force a re-read if we want */
gotsigchld = 1;
break;
default :
LOG(log_error, logtype_afpd, "afp_goaway: bad signal" );
}
return;
}
static void child_handler(void)
{
int fd;
int status, i;
pid_t pid;
#ifndef WAIT_ANY
#define WAIT_ANY (-1)
#endif /* ! WAIT_ANY */
while ((pid = waitpid(WAIT_ANY, &status, WNOHANG)) > 0) {
for (i = 0; i < server_children->nforks; i++) {
if ((fd = server_child_remove(server_children, i, pid)) != -1) {
fdset_del_fd(&fdset, &polldata, &fdset_used, &fdset_size, fd);
break;
}
}
if (WIFEXITED(status)) {
if (WEXITSTATUS(status))
LOG(log_info, logtype_afpd, "child[%d]: exited %d", pid, WEXITSTATUS(status));
else
LOG(log_info, logtype_afpd, "child[%d]: done", pid);
} else {
if (WIFSIGNALED(status))
LOG(log_info, logtype_afpd, "child[%d]: killed by signal %d", pid, WTERMSIG(status));
else
LOG(log_info, logtype_afpd, "child[%d]: died", pid);
}
}
}
static int setlimits(void)
{
struct rlimit rlim;
if (getrlimit(RLIMIT_NOFILE, &rlim) != 0) {
LOG(log_warning, logtype_afpd, "setlimits: reading current limits failed: %s", strerror(errno));
return -1;
}
if (rlim.rlim_cur != RLIM_INFINITY && rlim.rlim_cur < 65535) {
rlim.rlim_cur = 65535;
if (rlim.rlim_max != RLIM_INFINITY && rlim.rlim_max < 65535)
rlim.rlim_max = 65535;
if (setrlimit(RLIMIT_NOFILE, &rlim) != 0) {
LOG(log_warning, logtype_afpd, "setlimits: increasing limits failed: %s", strerror(errno));
return -1;
}
}
return 0;
}
int main(int ac, char **av)
{
fd_set rfds;
void *ipc;
struct sigaction sv;
sigset_t sigs;
int ret;
/* Parse argv args and initialize default options */
afp_options_parse_cmdline(&obj, ac, av);
if (!(obj.cmdlineflags & OPTION_DEBUG) && (daemonize(0, 0) != 0))
exit(EXITERR_SYS);
/* Log SIGBUS/SIGSEGV SBT */
fault_setup(NULL);
if (afp_config_parse(&obj, "afpd") != 0)
afp_exit(EXITERR_CONF);
/* Save the user's current umask */
obj.options.save_mask = umask(obj.options.umask);
/* install child handler for asp and dsi. we do this before afp_goaway
* as afp_goaway references stuff from here.
* XXX: this should really be setup after the initial connections. */
if (!(server_children = server_child_alloc(obj.options.connections, CHILD_NFORKS))) {
LOG(log_error, logtype_afpd, "main: server_child alloc: %s", strerror(errno) );
afp_exit(EXITERR_SYS);
}
sigemptyset(&sigs);
pthread_sigmask(SIG_SETMASK, &sigs, NULL);
memset(&sv, 0, sizeof(sv));
/* linux at least up to 2.4.22 send a SIGXFZ for vfat fs,
even if the file is open with O_LARGEFILE ! */
#ifdef SIGXFSZ
sv.sa_handler = SIG_IGN;
sigemptyset( &sv.sa_mask );
if (sigaction(SIGXFSZ, &sv, NULL ) < 0 ) {
LOG(log_error, logtype_afpd, "main: sigaction: %s", strerror(errno) );
afp_exit(EXITERR_SYS);
}
#endif
sv.sa_handler = afp_goaway; /* handler for all sigs */
sigemptyset( &sv.sa_mask );
sigaddset(&sv.sa_mask, SIGALRM);
sigaddset(&sv.sa_mask, SIGHUP);
sigaddset(&sv.sa_mask, SIGTERM);
sigaddset(&sv.sa_mask, SIGUSR1);
sigaddset(&sv.sa_mask, SIGQUIT);
sv.sa_flags = SA_RESTART;
if ( sigaction( SIGCHLD, &sv, NULL ) < 0 ) {
LOG(log_error, logtype_afpd, "main: sigaction: %s", strerror(errno) );
afp_exit(EXITERR_SYS);
}
sigemptyset( &sv.sa_mask );
sigaddset(&sv.sa_mask, SIGALRM);
sigaddset(&sv.sa_mask, SIGTERM);
sigaddset(&sv.sa_mask, SIGHUP);
sigaddset(&sv.sa_mask, SIGCHLD);
sigaddset(&sv.sa_mask, SIGQUIT);
sv.sa_flags = SA_RESTART;
if ( sigaction( SIGUSR1, &sv, NULL ) < 0 ) {
LOG(log_error, logtype_afpd, "main: sigaction: %s", strerror(errno) );
afp_exit(EXITERR_SYS);
}
sigemptyset( &sv.sa_mask );
sigaddset(&sv.sa_mask, SIGALRM);
sigaddset(&sv.sa_mask, SIGTERM);
sigaddset(&sv.sa_mask, SIGUSR1);
sigaddset(&sv.sa_mask, SIGCHLD);
sigaddset(&sv.sa_mask, SIGQUIT);
sv.sa_flags = SA_RESTART;
if ( sigaction( SIGHUP, &sv, NULL ) < 0 ) {
LOG(log_error, logtype_afpd, "main: sigaction: %s", strerror(errno) );
afp_exit(EXITERR_SYS);
}
sigemptyset( &sv.sa_mask );
sigaddset(&sv.sa_mask, SIGALRM);
sigaddset(&sv.sa_mask, SIGHUP);
sigaddset(&sv.sa_mask, SIGUSR1);
sigaddset(&sv.sa_mask, SIGCHLD);
sigaddset(&sv.sa_mask, SIGQUIT);
sv.sa_flags = SA_RESTART;
if ( sigaction( SIGTERM, &sv, NULL ) < 0 ) {
LOG(log_error, logtype_afpd, "main: sigaction: %s", strerror(errno) );
afp_exit(EXITERR_SYS);
}
sigemptyset( &sv.sa_mask );
sigaddset(&sv.sa_mask, SIGALRM);
sigaddset(&sv.sa_mask, SIGHUP);
sigaddset(&sv.sa_mask, SIGUSR1);
sigaddset(&sv.sa_mask, SIGCHLD);
sigaddset(&sv.sa_mask, SIGTERM);
sv.sa_flags = SA_RESTART;
if (sigaction(SIGQUIT, &sv, NULL ) < 0 ) {
LOG(log_error, logtype_afpd, "main: sigaction: %s", strerror(errno) );
afp_exit(EXITERR_SYS);
}
/* afp.conf: not in config file: lockfile, configfile
* preference: command-line provides defaults.
* config file over-writes defaults.
*
* we also need to make sure that killing afpd during startup
* won't leave any lingering registered names around.
*/
sigemptyset(&sigs);
sigaddset(&sigs, SIGALRM);
sigaddset(&sigs, SIGHUP);
sigaddset(&sigs, SIGUSR1);
#if 0
/* don't block SIGTERM */
sigaddset(&sigs, SIGTERM);
#endif
sigaddset(&sigs, SIGCHLD);
pthread_sigmask(SIG_BLOCK, &sigs, NULL);
if (configinit(&obj) != 0) {
LOG(log_error, logtype_afpd, "main: no servers configured");
afp_exit(EXITERR_CONF);
}
pthread_sigmask(SIG_UNBLOCK, &sigs, NULL);
/* Initialize */
cnid_init();
/* watch atp, dsi sockets and ipc parent/child file descriptor. */
if (obj.options.flags & OPTION_KEEPSESSIONS) {
LOG(log_note, logtype_afpd, "Activating continous service");
disasociated_ipc_fd = ipc_server_uds(_PATH_AFP_IPC);
}
fd_set_listening_sockets(&obj);
/* set limits */
(void)setlimits();
afp_child_t *child;
int recon_ipc_fd;
pid_t pid;
int saveerrno;
/* wait for an appleshare connection. parent remains in the loop
* while the children get handled by afp_over_{asp,dsi}. this is
* currently vulnerable to a denial-of-service attack if a
* connection is made without an actual login attempt being made
* afterwards. establishing timeouts for logins is a possible
* solution. */
while (1) {
LOG(log_maxdebug, logtype_afpd, "main: polling %i fds", fdset_used);
pthread_sigmask(SIG_UNBLOCK, &sigs, NULL);
ret = poll(fdset, fdset_used, -1);
pthread_sigmask(SIG_BLOCK, &sigs, NULL);
saveerrno = errno;
if (gotsigchld) {
gotsigchld = 0;
child_handler();
continue;
}
if (reloadconfig) {
nologin++;
auth_unload();
fd_reset_listening_sockets(&obj);
LOG(log_info, logtype_afpd, "re-reading configuration file");
configfree(&obj, NULL);
if (configinit(&obj) != 0) {
LOG(log_error, logtype_afpd, "config re-read: no servers configured");
afp_exit(EXITERR_CONF);
}
fd_set_listening_sockets(&obj);
nologin = 0;
reloadconfig = 0;
errno = saveerrno;
continue;
}
if (ret == 0)
continue;
if (ret < 0) {
if (errno == EINTR)
continue;
LOG(log_error, logtype_afpd, "main: can't wait for input: %s", strerror(errno));
break;
}
for (int i = 0; i < fdset_used; i++) {
if (fdset[i].revents & (POLLIN | POLLERR | POLLHUP | POLLNVAL)) {
switch (polldata[i].fdtype) {
case LISTEN_FD:
if (child = dsi_start(&obj, (DSI *)polldata[i].data, server_children)) {
/* Add IPC fd to select fd set */
fdset_add_fd(obj.options.connections + AFP_LISTENERS + FDSET_SAFETY,
&fdset,
&polldata,
&fdset_used,
&fdset_size,
child->ipc_fd,
IPC_FD,
child);
}
break;
case IPC_FD:
child = (afp_child_t *)polldata[i].data;
LOG(log_debug, logtype_afpd, "main: IPC request from child[%u]", child->pid);
if (ipc_server_read(server_children, child->ipc_fd) != 0) {
fdset_del_fd(&fdset, &polldata, &fdset_used, &fdset_size, child->ipc_fd);
close(child->ipc_fd);
child->ipc_fd = -1;
if ((obj.options.flags & OPTION_KEEPSESSIONS) && child->disasociated) {
LOG(log_note, logtype_afpd, "main: removing reattached child[%u]", child->pid);
server_child_remove(server_children, CHILD_DSIFORK, child->pid);
}
}
break;
case DISASOCIATED_IPC_FD:
LOG(log_debug, logtype_afpd, "main: IPC reconnect request");
if ((recon_ipc_fd = accept(disasociated_ipc_fd, NULL, NULL)) == -1) {
LOG(log_error, logtype_afpd, "main: accept: %s", strerror(errno));
break;
}
if (readt(recon_ipc_fd, &pid, sizeof(pid_t), 0, 1) != sizeof(pid_t)) {
LOG(log_error, logtype_afpd, "main: readt: %s", strerror(errno));
close(recon_ipc_fd);
break;
}
LOG(log_note, logtype_afpd, "main: IPC reconnect from pid [%u]", pid);
if ((child = server_child_add(server_children, CHILD_DSIFORK, pid, recon_ipc_fd)) == NULL) {
LOG(log_error, logtype_afpd, "main: server_child_add");
close(recon_ipc_fd);
break;
}
child->disasociated = 1;
fdset_add_fd(obj.options.connections + AFP_LISTENERS + FDSET_SAFETY,
&fdset,
&polldata,
&fdset_used,
&fdset_size,
recon_ipc_fd,
IPC_FD,
child);
break;
default:
LOG(log_debug, logtype_afpd, "main: IPC request for unknown type");
break;
} /* switch */
} /* if */
} /* for (i)*/
} /* while (1) */
return 0;
}
static afp_child_t *dsi_start(AFPObj *obj, DSI *dsi, server_child *server_children)
{
afp_child_t *child = NULL;
if (dsi_getsession(dsi, server_children, obj->options.tickleval, &child) != 0) {
LOG(log_error, logtype_afpd, "dsi_start: session error: %s", strerror(errno));
return NULL;
}
/* we've forked. */
if (child == NULL) {
configfree(obj, dsi);
afp_over_dsi(obj); /* start a session */
exit (0);
}
return child;
}
| igorbernstein/netatalk-debian | etc/afpd/main.c | C | gpl-2.0 | 16,185 |
/*
* ircd-hybrid: an advanced Internet Relay Chat Daemon(ircd).
* s_misc.c: Yet another miscellaneous functions file.
*
* Copyright (C) 2002 by the past and present ircd coders, and others.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* $Id: s_misc.c 33 2005-10-02 20:50:00Z knight $
*/
#include "stdinc.h"
#include "s_misc.h"
#include "client.h"
#include "common.h"
#include "irc_string.h"
#include "sprintf_irc.h"
#include "ircd.h"
#include "numeric.h"
#include "irc_res.h"
#include "fdlist.h"
#include "s_bsd.h"
#include "s_conf.h"
#include "s_serv.h"
#include "send.h"
#include "memory.h"
static const char *months[] =
{
"January", "February", "March", "April",
"May", "June", "July", "August",
"September", "October", "November","December"
};
static const char *weekdays[] =
{
"Sunday", "Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday"
};
char *
date(time_t lclock)
{
static char buf[80], plus;
struct tm *lt, *gm;
struct tm gmbuf;
int minswest;
if (!lclock)
lclock = CurrentTime;
gm = gmtime(&lclock);
memcpy(&gmbuf, gm, sizeof(gmbuf));
gm = &gmbuf;
lt = localtime(&lclock);
/*
* There is unfortunately no clean portable way to extract time zone
* offset information, so do ugly things.
*/
minswest = (gm->tm_hour - lt->tm_hour) * 60 + (gm->tm_min - lt->tm_min);
if (lt->tm_yday != gm->tm_yday)
{
if ((lt->tm_yday > gm->tm_yday && lt->tm_year == gm->tm_year) ||
(lt->tm_yday < gm->tm_yday && lt->tm_year != gm->tm_year))
minswest -= 24 * 60;
else
minswest += 24 * 60;
}
plus = (minswest > 0) ? '-' : '+';
if (minswest < 0)
minswest = -minswest;
ircsprintf(buf, "%s %s %d %d -- %02u:%02u:%02u %c%02u:%02u",
weekdays[lt->tm_wday], months[lt->tm_mon],lt->tm_mday,
lt->tm_year + 1900, lt->tm_hour, lt->tm_min, lt->tm_sec,
plus, minswest/60, minswest%60);
return buf;
}
const char *
smalldate(time_t lclock)
{
static char buf[MAX_DATE_STRING];
struct tm *lt, *gm;
struct tm gmbuf;
if (!lclock)
lclock = CurrentTime;
gm = gmtime(&lclock);
memcpy(&gmbuf, gm, sizeof(gmbuf));
gm = &gmbuf;
lt = localtime(&lclock);
ircsprintf(buf, "%d/%d/%d %02d.%02d",
lt->tm_year + 1900, lt->tm_mon + 1, lt->tm_mday,
lt->tm_hour, lt->tm_min);
return buf;
}
/* small_file_date()
* Make a small YYYYMMDD formatted string suitable for a
* dated file stamp.
*/
char *
small_file_date(time_t lclock)
{
static char timebuffer[MAX_DATE_STRING];
struct tm *tmptr;
if (!lclock)
time(&lclock);
tmptr = localtime(&lclock);
strftime(timebuffer, MAX_DATE_STRING, "%Y%m%d", tmptr);
return timebuffer;
}
#ifdef HAVE_LIBCRYPTO
char *
ssl_get_cipher(SSL *ssl)
{
static char buffer[128];
const char *name = NULL;
int bits;
switch (ssl->session->ssl_version)
{
case SSL2_VERSION:
name = "SSLv2";
break;
case SSL3_VERSION:
name = "SSLv3";
break;
case TLS1_VERSION:
name = "TLSv1";
break;
default:
name = "UNKNOWN";
}
SSL_CIPHER_get_bits(SSL_get_current_cipher(ssl), &bits);
snprintf(buffer, sizeof(buffer), "%s %s-%d",
name, SSL_get_cipher(ssl), bits);
return buffer;
}
#endif
| codemstr/eircd-hybrid | src/s_misc.c | C | gpl-2.0 | 3,996 |
/*
sort.c
Ruby/GSL: Ruby extension library for GSL (GNU Scientific Library)
(C) Copyright 2001-2006 by Yoshiki Tsunesada
Ruby/GSL is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY.
*/
#include <extconf.h>
#include "rb_gsl_array.h"
#include <gsl/gsl_heapsort.h>
#include <gsl/gsl_sort.h>
EXTERN ID RBGSL_ID_call;
EXTERN VALUE cgsl_complex;
int rb_gsl_comparison_double(const void *aa, const void *bb);
int rb_gsl_comparison_complex(const void *aa, const void *bb);
int rb_gsl_comparison_double(const void *aa, const void *bb)
{
double *a = NULL, *b = NULL;
a = (double *) aa;
b = (double *) bb;
return FIX2INT(rb_funcall(RB_GSL_MAKE_PROC, RBGSL_ID_call, 2, rb_float_new(*a), rb_float_new(*b)));
}
int rb_gsl_comparison_complex(const void *aa, const void *bb)
{
gsl_complex *a = NULL, *b = NULL;
a = (gsl_complex *) aa;
b = (gsl_complex *) bb;
return FIX2INT(rb_funcall(RB_GSL_MAKE_PROC, RBGSL_ID_call, 2,
Data_Wrap_Struct(cgsl_complex, 0, NULL, a),
Data_Wrap_Struct(cgsl_complex, 0, NULL, b)));
}
static VALUE rb_gsl_heapsort_vector(VALUE obj)
{
gsl_vector *v = NULL;
if (!rb_block_given_p()) rb_raise(rb_eRuntimeError, "Proc is not given");
Data_Get_Struct(obj, gsl_vector, v);
gsl_heapsort(v->data, v->size, sizeof(double), rb_gsl_comparison_double);
return obj;
}
static VALUE rb_gsl_heapsort_vector2(VALUE obj)
{
gsl_vector *v = NULL, *vnew = NULL;
if (!rb_block_given_p()) rb_raise(rb_eRuntimeError, "Proc is not given");
Data_Get_Struct(obj, gsl_vector, v);
vnew = gsl_vector_alloc(v->size);
gsl_vector_memcpy(vnew, v);
gsl_heapsort(vnew->data, vnew->size, sizeof(double), rb_gsl_comparison_double);
return Data_Wrap_Struct(cgsl_vector, 0, gsl_vector_free, vnew);
}
static VALUE rb_gsl_heapsort_index_vector(VALUE obj)
{
gsl_vector *v = NULL;
gsl_permutation *p = NULL;
if (!rb_block_given_p()) rb_raise(rb_eRuntimeError, "Proc is not given");
Data_Get_Struct(obj, gsl_vector, v);
p = gsl_permutation_alloc(v->size);
gsl_heapsort_index(p->data, v->data, v->size, sizeof(double), rb_gsl_comparison_double);
return Data_Wrap_Struct(cgsl_permutation, 0, gsl_permutation_free, p);
}
static VALUE rb_gsl_heapsort_vector_complex(VALUE obj)
{
gsl_vector_complex *v = NULL;
if (!rb_block_given_p()) rb_raise(rb_eRuntimeError, "Proc is not given");
Data_Get_Struct(obj, gsl_vector_complex, v);
gsl_heapsort(v->data, v->size, sizeof(gsl_complex), rb_gsl_comparison_complex);
return obj;
}
static VALUE rb_gsl_heapsort_vector_complex2(VALUE obj)
{
gsl_vector_complex *v = NULL, *vnew = NULL;
if (!rb_block_given_p()) rb_raise(rb_eRuntimeError, "Proc is not given");
Data_Get_Struct(obj, gsl_vector_complex, v);
vnew = gsl_vector_complex_alloc(v->size);
gsl_vector_complex_memcpy(vnew, v);
gsl_heapsort(vnew->data, vnew->size, sizeof(gsl_complex), rb_gsl_comparison_complex);
return Data_Wrap_Struct(cgsl_vector_complex, 0, gsl_vector_complex_free, vnew);
}
static VALUE rb_gsl_heapsort_index_vector_complex(VALUE obj)
{
gsl_vector_complex *v = NULL;
gsl_permutation *p = NULL;
if (!rb_block_given_p()) rb_raise(rb_eRuntimeError, "Proc is not given");
Data_Get_Struct(obj, gsl_vector_complex, v);
p = gsl_permutation_alloc(v->size);
gsl_heapsort_index(p->data, v->data, v->size, sizeof(gsl_complex), rb_gsl_comparison_complex);
return Data_Wrap_Struct(cgsl_permutation, 0, gsl_permutation_free, p);
}
/* singleton */
static VALUE rb_gsl_heapsort(VALUE obj, VALUE vv)
{
if (!rb_block_given_p()) rb_raise(rb_eRuntimeError, "Proc is not given");
if (rb_obj_is_kind_of(vv, cgsl_vector_complex)) {
return rb_gsl_heapsort_vector_complex(vv);
} else if (rb_obj_is_kind_of(vv, cgsl_vector)) {
return rb_gsl_heapsort_vector(vv);
} else {
rb_raise(rb_eTypeError, "wrong argument type %s (Vector or Vector::Complex expected)", rb_class2name(CLASS_OF(vv)));
}
return vv;
}
static VALUE rb_gsl_heapsort2(VALUE obj, VALUE vv)
{
if (!rb_block_given_p()) rb_raise(rb_eRuntimeError, "Proc is not given");
if (rb_obj_is_kind_of(vv, cgsl_vector_complex)) {
return rb_gsl_heapsort_vector_complex2(vv);
} else if (rb_obj_is_kind_of(vv, cgsl_vector)) {
return rb_gsl_heapsort_vector2(vv);
} else {
rb_raise(rb_eTypeError, "wrong argument type %s (Vector or Vector::Complex expected)", rb_class2name(CLASS_OF(vv)));
}
return vv;
}
static VALUE rb_gsl_heapsort_index(VALUE obj, VALUE vv)
{
if (!rb_block_given_p()) rb_raise(rb_eRuntimeError, "Proc is not given");
if (rb_obj_is_kind_of(vv, cgsl_vector_complex)) {
return rb_gsl_heapsort_index_vector_complex(vv);
} else if (rb_obj_is_kind_of(vv, cgsl_vector)) {
return rb_gsl_heapsort_index_vector(vv);
} else {
rb_raise(rb_eTypeError, "wrong argument type %s (Vector or Vector::Complex expected)", rb_class2name(CLASS_OF(vv)));
}
return vv;
}
/*****/
#ifdef HAVE_NARRAY_H
#include "narray.h"
static VALUE rb_gsl_sort_narray(VALUE obj)
{
struct NARRAY *na;
size_t size, stride;
double *ptr1, *ptr2;
VALUE ary;
GetNArray(obj, na);
ptr1 = (double*) na->ptr;
size = na->total;
stride = 1;
ary = na_make_object(NA_DFLOAT, na->rank, na->shape, CLASS_OF(obj));
ptr2 = NA_PTR_TYPE(ary, double*);
memcpy(ptr2, ptr1, sizeof(double)*size);
gsl_sort(ptr2, stride, size);
return ary;
}
static VALUE rb_gsl_sort_narray_bang(VALUE obj)
{
struct NARRAY *na;
size_t size, stride;
double *ptr1;
GetNArray(obj, na);
ptr1 = (double*) na->ptr;
size = na->total;
stride = 1;
gsl_sort(ptr1, stride, size);
return obj;
}
static VALUE rb_gsl_sort_index_narray(VALUE obj)
{
struct NARRAY *na;
size_t size, stride;
double *ptr1;
gsl_permutation *p;
GetNArray(obj, na);
ptr1 = (double*) na->ptr;
size = na->total;
stride = 1;
p = gsl_permutation_alloc(size);
gsl_sort_index(p->data, ptr1, stride, size);
return Data_Wrap_Struct(cgsl_permutation, 0, gsl_permutation_free, p);
}
#endif
void Init_gsl_sort(VALUE module)
{
rb_define_singleton_method(module, "heapsort!", rb_gsl_heapsort, 1);
rb_define_singleton_method(module, "heapsort", rb_gsl_heapsort2, 1);
rb_define_singleton_method(module, "heapsort_index", rb_gsl_heapsort_index, 1);
rb_define_method(cgsl_vector, "heapsort!", rb_gsl_heapsort_vector, 0);
rb_define_method(cgsl_vector, "heapsort", rb_gsl_heapsort_vector2, 0);
rb_define_method(cgsl_vector, "heapsort_index", rb_gsl_heapsort_index_vector, 0);
rb_define_method(cgsl_vector_complex, "heapsort!", rb_gsl_heapsort_vector_complex, 0);
rb_define_method(cgsl_vector_complex, "heapsort", rb_gsl_heapsort_vector_complex2, 0);
rb_define_method(cgsl_vector_complex, "heapsort_index", rb_gsl_heapsort_index_vector_complex, 0);
#ifdef HAVE_NARRAY_H
rb_define_method(cNArray, "gsl_sort", rb_gsl_sort_narray, 0);
rb_define_method(cNArray, "gsl_sort!", rb_gsl_sort_narray_bang, 0);
rb_define_method(cNArray, "gsl_sort_index", rb_gsl_sort_index_narray, 0);
#endif
}
| iliya-gr/rb-gsl | ext/gsl/sort.c | C | gpl-2.0 | 7,112 |
/* frame_data.c
* Routines for packet disassembly
*
* $Id$
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <glib.h>
#include <wiretap/wtap.h>
#include <epan/frame_data.h>
#include <epan/packet.h>
#include <epan/emem.h>
#include <epan/timestamp.h>
/* Protocol-specific data attached to a frame_data structure - protocol
index and opaque pointer. */
typedef struct _frame_proto_data {
int proto;
void *proto_data;
} frame_proto_data;
/* XXX - I declared this static, because it only seems to be used by
* p_get_proto_data and p_add_proto_data
*/
static gint
p_compare(gconstpointer a, gconstpointer b)
{
const frame_proto_data *ap = (const frame_proto_data *)a;
const frame_proto_data *bp = (const frame_proto_data *)b;
if (ap -> proto > bp -> proto)
return 1;
else if (ap -> proto == bp -> proto)
return 0;
else
return -1;
}
void
p_add_proto_data(frame_data *fd, int proto, void *proto_data)
{
frame_proto_data *p1 = se_alloc(sizeof(frame_proto_data));
p1->proto = proto;
p1->proto_data = proto_data;
/* Add it to the GSLIST */
fd -> pfd = g_slist_insert_sorted(fd -> pfd,
(gpointer *)p1,
p_compare);
}
void *
p_get_proto_data(frame_data *fd, int proto)
{
frame_proto_data temp, *p1;
GSList *item;
temp.proto = proto;
temp.proto_data = NULL;
item = g_slist_find_custom(fd->pfd, (gpointer *)&temp, p_compare);
if (item) {
p1 = (frame_proto_data *)item->data;
return p1->proto_data;
}
return NULL;
}
void
p_remove_proto_data(frame_data *fd, int proto)
{
frame_proto_data temp;
GSList *item;
temp.proto = proto;
temp.proto_data = NULL;
item = g_slist_find_custom(fd->pfd, (gpointer *)&temp, p_compare);
if (item) {
fd->pfd = g_slist_remove(fd->pfd, item->data);
}
}
#define COMPARE_FRAME_NUM() ((fdata1->num < fdata2->num) ? -1 : \
(fdata1->num > fdata2->num) ? 1 : \
0)
#define COMPARE_NUM(f) ((fdata1->f < fdata2->f) ? -1 : \
(fdata1->f > fdata2->f) ? 1 : \
COMPARE_FRAME_NUM())
/* Compare time stamps.
A packet whose time is a reference time is considered to have
a lower time stamp than any frame with a non-reference time;
if both packets' times are reference times, we compare the
times of the packets. */
#define COMPARE_TS_REAL(time1, time2) \
((fdata1->flags.ref_time && !fdata2->flags.ref_time) ? -1 : \
(!fdata1->flags.ref_time && fdata2->flags.ref_time) ? 1 : \
((time1).secs < (time2).secs) ? -1 : \
((time1).secs > (time2).secs) ? 1 : \
((time1).nsecs < (time2).nsecs) ? -1 :\
((time1).nsecs > (time2).nsecs) ? 1 : \
COMPARE_FRAME_NUM())
#define COMPARE_TS(ts) COMPARE_TS_REAL(fdata1->ts, fdata2->ts)
void
frame_delta_abs_time(const frame_data *fdata, const frame_data *prev, nstime_t *delta)
{
if (prev) {
nstime_delta(delta, &fdata->abs_ts, &prev->abs_ts);
} else {
/* If we don't have the time stamp of the previous packet,
it's because we have no displayed/captured packets prior to this.
Set the delta time to zero. */
nstime_set_zero(delta);
}
}
static gint
frame_data_time_delta_compare(const frame_data *fdata1, const frame_data *fdata2)
{
nstime_t del_cap_ts1, del_cap_ts2;
frame_delta_abs_time(fdata1, fdata1->prev_cap, &del_cap_ts1);
frame_delta_abs_time(fdata2, fdata2->prev_cap, &del_cap_ts2);
return COMPARE_TS_REAL(del_cap_ts1, del_cap_ts2);
}
static gint
frame_data_time_delta_dis_compare(const frame_data *fdata1, const frame_data *fdata2)
{
nstime_t del_dis_ts1, del_dis_ts2;
frame_delta_abs_time(fdata1, fdata1->prev_dis, &del_dis_ts1);
frame_delta_abs_time(fdata2, fdata2->prev_dis, &del_dis_ts2);
return COMPARE_TS_REAL(del_dis_ts1, del_dis_ts2);
}
gint
frame_data_compare(const frame_data *fdata1, const frame_data *fdata2, int field)
{
switch (field) {
case COL_NUMBER:
return COMPARE_FRAME_NUM();
case COL_CLS_TIME:
switch (timestamp_get_type()) {
case TS_ABSOLUTE:
case TS_ABSOLUTE_WITH_DATE:
case TS_UTC:
case TS_UTC_WITH_DATE:
case TS_EPOCH:
return COMPARE_TS(abs_ts);
case TS_RELATIVE:
return COMPARE_TS(rel_ts);
case TS_DELTA:
return frame_data_time_delta_compare(fdata1, fdata2);
case TS_DELTA_DIS:
return frame_data_time_delta_dis_compare(fdata1, fdata2);
case TS_NOT_SET:
return 0;
}
return 0;
case COL_ABS_TIME:
case COL_ABS_DATE_TIME:
case COL_UTC_TIME:
case COL_UTC_DATE_TIME:
return COMPARE_TS(abs_ts);
case COL_REL_TIME:
return COMPARE_TS(rel_ts);
case COL_DELTA_TIME:
return frame_data_time_delta_compare(fdata1, fdata2);
case COL_DELTA_TIME_DIS:
return frame_data_time_delta_dis_compare(fdata1, fdata2);
case COL_PACKET_LENGTH:
return COMPARE_NUM(pkt_len);
case COL_CUMULATIVE_BYTES:
return COMPARE_NUM(cum_bytes);
}
g_return_val_if_reached(0);
}
void
frame_data_init(frame_data *fdata, guint32 num,
const struct wtap_pkthdr *phdr, gint64 offset,
guint32 cum_bytes)
{
fdata->pfd = NULL;
fdata->num = num;
fdata->interface_id = phdr->interface_id;
fdata->pkt_len = phdr->len;
fdata->cum_bytes = cum_bytes + phdr->len;
fdata->cap_len = phdr->caplen;
fdata->file_off = offset;
fdata->subnum = 0;
/* To save some memory, we coerce it into a gint16 */
g_assert(phdr->pkt_encap <= G_MAXINT16);
fdata->lnk_t = (gint16) phdr->pkt_encap;
fdata->flags.passed_dfilter = 0;
fdata->flags.dependent_of_displayed = 0;
fdata->flags.encoding = PACKET_CHAR_ENC_CHAR_ASCII;
fdata->flags.visited = 0;
fdata->flags.marked = 0;
fdata->flags.ref_time = 0;
fdata->flags.ignored = 0;
fdata->flags.has_ts = (phdr->presence_flags & WTAP_HAS_TS) ? 1 : 0;
fdata->flags.has_if_id = (phdr->presence_flags & WTAP_HAS_INTERFACE_ID) ? 1 : 0;
fdata->color_filter = NULL;
fdata->abs_ts.secs = phdr->ts.secs;
fdata->abs_ts.nsecs = phdr->ts.nsecs;
fdata->shift_offset.secs = 0;
fdata->shift_offset.nsecs = 0;
fdata->rel_ts.secs = 0;
fdata->rel_ts.nsecs = 0;
fdata->prev_dis = NULL;
fdata->prev_cap = NULL;
fdata->opt_comment = phdr->opt_comment;
}
void
frame_data_set_before_dissect(frame_data *fdata,
nstime_t *elapsed_time,
nstime_t *first_ts,
const frame_data *prev_dis,
const frame_data *prev_cap)
{
/* If we don't have the time stamp of the first packet in the
capture, it's because this is the first packet. Save the time
stamp of this packet as the time stamp of the first packet. */
if (nstime_is_unset(first_ts))
*first_ts = fdata->abs_ts;
/* if this frames is marked as a reference time frame, reset
firstsec and firstusec to this frame */
if(fdata->flags.ref_time)
*first_ts = fdata->abs_ts;
/* Get the time elapsed between the first packet and this packet. */
nstime_delta(&fdata->rel_ts, &fdata->abs_ts, first_ts);
/* If it's greater than the current elapsed time, set the elapsed time
to it (we check for "greater than" so as not to be confused by
time moving backwards). */
if ((gint32)elapsed_time->secs < fdata->rel_ts.secs
|| ((gint32)elapsed_time->secs == fdata->rel_ts.secs && (gint32)elapsed_time->nsecs < fdata->rel_ts.nsecs)) {
*elapsed_time = fdata->rel_ts;
}
fdata->prev_dis = prev_dis;
fdata->prev_cap = prev_cap;
}
void
frame_data_set_after_dissect(frame_data *fdata,
guint32 *cum_bytes)
{
/* This frame either passed the display filter list or is marked as
a time reference frame. All time reference frames are displayed
even if they dont pass the display filter */
if(fdata->flags.ref_time){
/* if this was a TIME REF frame we should reset the cul bytes field */
*cum_bytes = fdata->pkt_len;
fdata->cum_bytes = *cum_bytes;
} else {
/* increase cum_bytes with this packets length */
*cum_bytes += fdata->pkt_len;
fdata->cum_bytes = *cum_bytes;
}
}
void
frame_data_cleanup(frame_data *fdata)
{
if (fdata->pfd) {
g_slist_free(fdata->pfd);
fdata->pfd = NULL;
}
/* XXX, frame_data_cleanup() is called when redissecting (rescan_packets()),
* which might be triggered by lot of things, like: preferences change,
* setting manual address resolve, etc.. (grep by redissect_packets)
* fdata->opt_comment can be set by user, which we must not discard when redissecting.
*/
#if 0
if (fdata->opt_comment) {
g_free(fdata->opt_comment);
fdata->opt_comment = NULL;
}
#endif
}
| sstjohn/wireshark | epan/frame_data.c | C | gpl-2.0 | 9,594 |
/*
* computeOnsetFeatures.c
*
* Code generation for function 'computeOnsetFeatures'
*
* C source code generated on: Fri Apr 25 23:35:45 2014
*
*/
/* Include files */
#include "rt_nonfinite.h"
#include "computeOnsetFeatures_export.h"
/* Type Definitions */
#ifndef struct_emxArray__common
#define struct_emxArray__common
struct emxArray__common
{
void *data;
int32_T *size;
int32_T allocatedSize;
int32_T numDimensions;
boolean_T canFreeData;
};
#endif /*struct_emxArray__common*/
#ifndef typedef_emxArray__common
#define typedef_emxArray__common
typedef struct emxArray__common emxArray__common;
#endif /*typedef_emxArray__common*/
#ifndef struct_emxArray_int32_T
#define struct_emxArray_int32_T
struct emxArray_int32_T
{
int32_T *data;
int32_T *size;
int32_T allocatedSize;
int32_T numDimensions;
boolean_T canFreeData;
};
#endif /*struct_emxArray_int32_T*/
#ifndef typedef_emxArray_int32_T
#define typedef_emxArray_int32_T
typedef struct emxArray_int32_T emxArray_int32_T;
#endif /*typedef_emxArray_int32_T*/
/* Function Declarations */
static void ConstantPad(const emxArray_real_T *a, const real_T padSize[2],
emxArray_real_T *b);
static void b_eml_li_find(const boolean_T x[12], int32_T y_data[12], int32_T
y_size[1]);
static void b_eml_null_assignment(emxArray_boolean_T *x, const emxArray_real_T
*idx);
static void b_emxInit_boolean_T(emxArray_boolean_T **pEmxArray, int32_T
numDimensions);
static void b_emxInit_real_T(emxArray_real_T **pEmxArray, int32_T numDimensions);
static real_T b_std(const real_T varargin_1[17]);
static void bsxfun(const real_T a[17], real_T b, real_T c[17]);
static void c_eml_null_assignment(emxArray_real_T *x, const emxArray_real_T *idx);
static real_T c_std(const real_T varargin_1[17]);
static int32_T div_s32(int32_T numerator, int32_T denominator);
static void eml_li_find(const emxArray_boolean_T *x, emxArray_int32_T *y);
static void eml_null_assignment(emxArray_boolean_T *x);
static void eml_sort(const real_T x[17], real_T y[17], int32_T idx[17]);
static void emxEnsureCapacity(emxArray__common *emxArray, int32_T oldNumel,
int32_T elementSize);
static void emxFree_boolean_T(emxArray_boolean_T **pEmxArray);
static void emxFree_int32_T(emxArray_int32_T **pEmxArray);
static void emxFree_real_T(emxArray_real_T **pEmxArray);
static void emxInit_boolean_T(emxArray_boolean_T **pEmxArray, int32_T
numDimensions);
static void emxInit_int32_T(emxArray_int32_T **pEmxArray, int32_T numDimensions);
static void emxInit_real_T(emxArray_real_T **pEmxArray, int32_T numDimensions);
static real_T featureSpectralCentroid(real_T S[17]);
static real_T featureSpectralCrest(const real_T S[17]);
static void filter(const emxArray_real_T *x, real_T zi, emxArray_real_T *y);
static void filtfilt(const emxArray_real_T *x_in, emxArray_real_T *y_out);
static void histogramFeatures(const real_T ioiHist[17], real_T features[12]);
static void ioiHistogram(emxArray_boolean_T *onsets, const emxArray_real_T *T,
real_T ioiHist[17]);
static void onsetDetection(const emxArray_real_T *spec, emxArray_boolean_T
*onsets, emxArray_real_T *flux);
static void onsetFlux(const emxArray_real_T *S, emxArray_real_T *flux);
static void padarray(const emxArray_real_T *varargin_1, emxArray_real_T *b);
static void rdivide(const emxArray_real_T *x, real_T y, emxArray_real_T *z);
static real_T rt_powd_snf(real_T u0, real_T u1);
/* Function Definitions */
static void ConstantPad(const emxArray_real_T *a, const real_T padSize[2],
emxArray_real_T *b)
{
real_T sizeB[2];
int32_T cdiff;
uint32_T varargin_1[2];
int32_T ndbl;
emxArray_real_T *idxB;
emxArray_int32_T *r7;
emxArray_real_T *r8;
emxArray_real_T *r9;
int32_T k;
int32_T absb;
int32_T apnd;
int32_T i4;
emxArray_boolean_T *x;
real_T idxB1;
real_T idxB2;
int32_T b_sizeB[2];
int32_T outsize[2];
for (cdiff = 0; cdiff < 2; cdiff++) {
sizeB[cdiff] = 0.0;
}
for (cdiff = 0; cdiff < 2; cdiff++) {
varargin_1[cdiff] = (uint32_T)a->size[cdiff];
}
ndbl = (int32_T)varargin_1[0];
if ((int32_T)varargin_1[1] > (int32_T)varargin_1[0]) {
ndbl = (int32_T)varargin_1[1];
}
emxInit_real_T(&idxB, 2);
cdiff = idxB->size[0] * idxB->size[1];
idxB->size[0] = ndbl;
idxB->size[1] = 2;
emxEnsureCapacity((emxArray__common *)idxB, cdiff, (int32_T)sizeof(real_T));
ndbl <<= 1;
for (cdiff = 0; cdiff < ndbl; cdiff++) {
idxB->data[cdiff] = 0.0;
}
emxInit_int32_T(&r7, 1);
emxInit_real_T(&r8, 2);
emxInit_real_T(&r9, 2);
for (k = 0; k < 2; k++) {
sizeB[k] = (real_T)a->size[k] + 2.0 * padSize[k];
if (1 > a->size[k]) {
ndbl = 0;
} else {
ndbl = a->size[k];
}
cdiff = r7->size[0];
r7->size[0] = ndbl;
emxEnsureCapacity((emxArray__common *)r7, cdiff, (int32_T)sizeof(int32_T));
for (cdiff = 0; cdiff < ndbl; cdiff++) {
r7->data[cdiff] = cdiff;
}
if (a->size[k] < 1) {
absb = -1;
apnd = 0;
} else {
ndbl = (int32_T)floor(((real_T)a->size[k] - 1.0) + 0.5);
apnd = ndbl + 1;
cdiff = (ndbl - a->size[k]) + 1;
absb = a->size[k];
if (1 > absb) {
i4 = 1;
} else {
i4 = absb;
}
if (fabs(cdiff) < 4.4408920985006262E-16 * (real_T)i4) {
ndbl++;
apnd = a->size[k];
} else if (cdiff > 0) {
apnd = ndbl;
} else {
ndbl++;
}
absb = ndbl - 1;
}
cdiff = r8->size[0] * r8->size[1];
r8->size[0] = 1;
r8->size[1] = absb + 1;
emxEnsureCapacity((emxArray__common *)r8, cdiff, (int32_T)sizeof(real_T));
if (absb + 1 > 0) {
r8->data[0] = 1.0;
if (absb + 1 > 1) {
r8->data[absb] = apnd;
ndbl = absb / 2;
for (cdiff = 1; cdiff < ndbl; cdiff++) {
r8->data[cdiff] = 1.0 + (real_T)cdiff;
r8->data[absb - cdiff] = apnd - cdiff;
}
if (ndbl << 1 == absb) {
r8->data[ndbl] = (1.0 + (real_T)apnd) / 2.0;
} else {
r8->data[ndbl] = 1.0 + (real_T)ndbl;
r8->data[ndbl + 1] = apnd - ndbl;
}
}
}
cdiff = r9->size[0] * r9->size[1];
r9->size[0] = 1;
r9->size[1] = r8->size[1];
emxEnsureCapacity((emxArray__common *)r9, cdiff, (int32_T)sizeof(real_T));
ndbl = r8->size[1];
for (cdiff = 0; cdiff < ndbl; cdiff++) {
r9->data[r9->size[0] * cdiff] = r8->data[r8->size[0] * cdiff] + padSize[k];
}
ndbl = r7->size[0];
for (cdiff = 0; cdiff < ndbl; cdiff++) {
idxB->data[r7->data[cdiff] + idxB->size[0] * k] = r9->data[cdiff];
}
}
emxFree_real_T(&r9);
emxFree_real_T(&r8);
emxFree_int32_T(&r7);
b_emxInit_boolean_T(&x, 1);
ndbl = idxB->size[0];
cdiff = x->size[0];
x->size[0] = ndbl;
emxEnsureCapacity((emxArray__common *)x, cdiff, (int32_T)sizeof(boolean_T));
for (cdiff = 0; cdiff < ndbl; cdiff++) {
x->data[cdiff] = (idxB->data[cdiff] != 0.0);
}
if (x->size[0] == 0) {
idxB1 = 0.0;
} else {
idxB1 = x->data[0];
for (k = 2; k <= x->size[0]; k++) {
idxB1 += (real_T)x->data[k - 1];
}
}
ndbl = idxB->size[0];
cdiff = x->size[0];
x->size[0] = ndbl;
emxEnsureCapacity((emxArray__common *)x, cdiff, (int32_T)sizeof(boolean_T));
for (cdiff = 0; cdiff < ndbl; cdiff++) {
x->data[cdiff] = (idxB->data[cdiff + idxB->size[0]] != 0.0);
}
if (x->size[0] == 0) {
idxB2 = 0.0;
} else {
idxB2 = x->data[0];
for (k = 2; k <= x->size[0]; k++) {
idxB2 += (real_T)x->data[k - 1];
}
}
emxFree_boolean_T(&x);
b_sizeB[0] = (int32_T)sizeB[0];
b_sizeB[1] = (int32_T)sizeB[1];
for (cdiff = 0; cdiff < 2; cdiff++) {
outsize[cdiff] = b_sizeB[cdiff];
}
cdiff = b->size[0] * b->size[1];
b->size[0] = outsize[0];
emxEnsureCapacity((emxArray__common *)b, cdiff, (int32_T)sizeof(real_T));
cdiff = b->size[0] * b->size[1];
b->size[1] = outsize[1];
emxEnsureCapacity((emxArray__common *)b, cdiff, (int32_T)sizeof(real_T));
ndbl = outsize[0] * outsize[1];
for (cdiff = 0; cdiff < ndbl; cdiff++) {
b->data[cdiff] = 0.0;
}
for (ndbl = 0; ndbl < (int32_T)idxB1; ndbl++) {
for (cdiff = 0; cdiff < (int32_T)idxB2; cdiff++) {
b->data[((int32_T)idxB->data[(int32_T)(1.0 + (real_T)ndbl) - 1] + b->size
[0] * ((int32_T)idxB->data[((int32_T)(1.0 + (real_T)cdiff) +
idxB->size[0]) - 1] - 1)) - 1] = a->data[((int32_T)(1.0 +
(real_T)ndbl) + a->size[0] * ((int32_T)(1.0 + (real_T)cdiff) - 1)) - 1];
}
}
emxFree_real_T(&idxB);
}
static void b_eml_li_find(const boolean_T x[12], int32_T y_data[12], int32_T
y_size[1])
{
int32_T k;
int32_T i;
k = 0;
for (i = 0; i < 12; i++) {
if (x[i]) {
k++;
}
}
y_size[0] = k;
k = 0;
for (i = 0; i < 12; i++) {
if (x[i]) {
y_data[k] = i + 1;
k++;
}
}
}
static void b_eml_null_assignment(emxArray_boolean_T *x, const emxArray_real_T
*idx)
{
int32_T nxin;
int32_T k;
emxArray_int32_T *r13;
emxArray_boolean_T *b_x;
emxArray_boolean_T *c_x;
int32_T nxout;
int32_T i6;
int32_T k0;
emxArray_boolean_T *b;
nxin = x->size[0] * x->size[1];
if (idx->size[1] == 1) {
for (k = (int32_T)idx->data[0]; k < nxin; k++) {
x->data[k - 1] = x->data[k];
}
emxInit_int32_T(&r13, 1);
emxInit_boolean_T(&b_x, 2);
b_emxInit_boolean_T(&c_x, 1);
if ((x->size[0] != 1) && (x->size[1] == 1)) {
if (1 > nxin - 1) {
nxout = 0;
} else {
nxout = nxin - 1;
}
i6 = c_x->size[0];
c_x->size[0] = nxout;
emxEnsureCapacity((emxArray__common *)c_x, i6, (int32_T)sizeof(boolean_T));
for (i6 = 0; i6 < nxout; i6++) {
c_x->data[i6] = x->data[i6];
}
i6 = x->size[0] * x->size[1];
x->size[0] = nxout;
x->size[1] = 1;
emxEnsureCapacity((emxArray__common *)x, i6, (int32_T)sizeof(boolean_T));
i6 = 0;
while (i6 <= 0) {
for (i6 = 0; i6 < nxout; i6++) {
x->data[i6] = c_x->data[i6];
}
i6 = 1;
}
} else {
if (1 > nxin - 1) {
nxout = 0;
} else {
nxout = nxin - 1;
}
i6 = r13->size[0];
r13->size[0] = nxout;
emxEnsureCapacity((emxArray__common *)r13, i6, (int32_T)sizeof(int32_T));
for (i6 = 0; i6 < nxout; i6++) {
r13->data[i6] = 1 + i6;
}
nxout = r13->size[0];
i6 = b_x->size[0] * b_x->size[1];
b_x->size[0] = 1;
b_x->size[1] = nxout;
emxEnsureCapacity((emxArray__common *)b_x, i6, (int32_T)sizeof(boolean_T));
for (i6 = 0; i6 < nxout; i6++) {
k = 0;
while (k <= 0) {
b_x->data[b_x->size[0] * i6] = x->data[r13->data[i6] - 1];
k = 1;
}
}
i6 = x->size[0] * x->size[1];
x->size[0] = b_x->size[0];
x->size[1] = b_x->size[1];
emxEnsureCapacity((emxArray__common *)x, i6, (int32_T)sizeof(boolean_T));
nxout = b_x->size[1];
for (i6 = 0; i6 < nxout; i6++) {
k0 = b_x->size[0];
for (k = 0; k < k0; k++) {
x->data[k + x->size[0] * i6] = b_x->data[k + b_x->size[0] * i6];
}
}
}
emxFree_boolean_T(&c_x);
emxFree_boolean_T(&b_x);
emxFree_int32_T(&r13);
} else {
emxInit_boolean_T(&b, 2);
i6 = b->size[0] * b->size[1];
b->size[0] = 1;
b->size[1] = nxin;
emxEnsureCapacity((emxArray__common *)b, i6, (int32_T)sizeof(boolean_T));
for (i6 = 0; i6 < nxin; i6++) {
b->data[i6] = FALSE;
}
for (k = 1; k <= idx->size[1]; k++) {
b->data[(int32_T)idx->data[k - 1] - 1] = TRUE;
}
nxout = 0;
for (k = 1; k <= b->size[1]; k++) {
nxout += b->data[k - 1];
}
nxout = nxin - nxout;
k0 = -1;
for (k = 1; k <= nxin; k++) {
if ((k > b->size[1]) || (!b->data[k - 1])) {
k0++;
x->data[k0] = x->data[k - 1];
}
}
emxFree_boolean_T(&b);
emxInit_int32_T(&r13, 1);
emxInit_boolean_T(&b_x, 2);
b_emxInit_boolean_T(&c_x, 1);
if ((x->size[0] != 1) && (x->size[1] == 1)) {
if (1 > nxout) {
nxout = 0;
}
i6 = c_x->size[0];
c_x->size[0] = nxout;
emxEnsureCapacity((emxArray__common *)c_x, i6, (int32_T)sizeof(boolean_T));
for (i6 = 0; i6 < nxout; i6++) {
c_x->data[i6] = x->data[i6];
}
i6 = x->size[0] * x->size[1];
x->size[0] = nxout;
x->size[1] = 1;
emxEnsureCapacity((emxArray__common *)x, i6, (int32_T)sizeof(boolean_T));
i6 = 0;
while (i6 <= 0) {
for (i6 = 0; i6 < nxout; i6++) {
x->data[i6] = c_x->data[i6];
}
i6 = 1;
}
} else {
if (1 > nxout) {
nxout = 0;
}
i6 = r13->size[0];
r13->size[0] = nxout;
emxEnsureCapacity((emxArray__common *)r13, i6, (int32_T)sizeof(int32_T));
for (i6 = 0; i6 < nxout; i6++) {
r13->data[i6] = 1 + i6;
}
nxout = r13->size[0];
i6 = b_x->size[0] * b_x->size[1];
b_x->size[0] = 1;
b_x->size[1] = nxout;
emxEnsureCapacity((emxArray__common *)b_x, i6, (int32_T)sizeof(boolean_T));
for (i6 = 0; i6 < nxout; i6++) {
k = 0;
while (k <= 0) {
b_x->data[b_x->size[0] * i6] = x->data[r13->data[i6] - 1];
k = 1;
}
}
i6 = x->size[0] * x->size[1];
x->size[0] = b_x->size[0];
x->size[1] = b_x->size[1];
emxEnsureCapacity((emxArray__common *)x, i6, (int32_T)sizeof(boolean_T));
nxout = b_x->size[1];
for (i6 = 0; i6 < nxout; i6++) {
k0 = b_x->size[0];
for (k = 0; k < k0; k++) {
x->data[k + x->size[0] * i6] = b_x->data[k + b_x->size[0] * i6];
}
}
}
emxFree_boolean_T(&c_x);
emxFree_boolean_T(&b_x);
emxFree_int32_T(&r13);
}
}
static void b_emxInit_boolean_T(emxArray_boolean_T **pEmxArray, int32_T
numDimensions)
{
emxArray_boolean_T *emxArray;
int32_T i;
*pEmxArray = (emxArray_boolean_T *)malloc(sizeof(emxArray_boolean_T));
emxArray = *pEmxArray;
emxArray->data = (boolean_T *)NULL;
emxArray->numDimensions = numDimensions;
emxArray->size = (int32_T *)malloc((uint32_T)(sizeof(int32_T) * numDimensions));
emxArray->allocatedSize = 0;
emxArray->canFreeData = TRUE;
for (i = 0; i < numDimensions; i++) {
emxArray->size[i] = 0;
}
}
static void b_emxInit_real_T(emxArray_real_T **pEmxArray, int32_T numDimensions)
{
emxArray_real_T *emxArray;
int32_T i;
*pEmxArray = (emxArray_real_T *)malloc(sizeof(emxArray_real_T));
emxArray = *pEmxArray;
emxArray->data = (real_T *)NULL;
emxArray->numDimensions = numDimensions;
emxArray->size = (int32_T *)malloc((uint32_T)(sizeof(int32_T) * numDimensions));
emxArray->allocatedSize = 0;
emxArray->canFreeData = TRUE;
for (i = 0; i < numDimensions; i++) {
emxArray->size[i] = 0;
}
}
static real_T b_std(const real_T varargin_1[17])
{
real_T y;
int32_T ix;
real_T xbar;
int32_T k;
real_T r;
ix = 0;
xbar = varargin_1[0];
for (k = 0; k < 16; k++) {
ix++;
xbar += varargin_1[ix];
}
xbar /= 17.0;
ix = 0;
r = varargin_1[0] - xbar;
y = r * r;
for (k = 0; k < 16; k++) {
ix++;
r = varargin_1[ix] - xbar;
y += r * r;
}
y /= 16.0;
return sqrt(y);
}
static void bsxfun(const real_T a[17], real_T b, real_T c[17])
{
int32_T k;
for (k = 0; k < 17; k++) {
c[k] = a[k] - b;
}
}
static void c_eml_null_assignment(emxArray_real_T *x, const emxArray_real_T *idx)
{
int32_T nxin;
int32_T k;
emxArray_int32_T *r14;
emxArray_real_T *b_x;
emxArray_real_T *c_x;
int32_T nxout;
int32_T i7;
int32_T k0;
emxArray_boolean_T *b;
nxin = x->size[0] * x->size[1];
if (idx->size[1] == 1) {
for (k = (int32_T)idx->data[0]; k < nxin; k++) {
x->data[k - 1] = x->data[k];
}
emxInit_int32_T(&r14, 1);
emxInit_real_T(&b_x, 2);
b_emxInit_real_T(&c_x, 1);
if ((x->size[0] != 1) && (x->size[1] == 1)) {
if (1 > nxin - 1) {
nxout = 0;
} else {
nxout = nxin - 1;
}
i7 = c_x->size[0];
c_x->size[0] = nxout;
emxEnsureCapacity((emxArray__common *)c_x, i7, (int32_T)sizeof(real_T));
for (i7 = 0; i7 < nxout; i7++) {
c_x->data[i7] = x->data[i7];
}
i7 = x->size[0] * x->size[1];
x->size[0] = nxout;
x->size[1] = 1;
emxEnsureCapacity((emxArray__common *)x, i7, (int32_T)sizeof(real_T));
i7 = 0;
while (i7 <= 0) {
for (i7 = 0; i7 < nxout; i7++) {
x->data[i7] = c_x->data[i7];
}
i7 = 1;
}
} else {
if (1 > nxin - 1) {
nxout = 0;
} else {
nxout = nxin - 1;
}
i7 = r14->size[0];
r14->size[0] = nxout;
emxEnsureCapacity((emxArray__common *)r14, i7, (int32_T)sizeof(int32_T));
for (i7 = 0; i7 < nxout; i7++) {
r14->data[i7] = 1 + i7;
}
nxout = r14->size[0];
i7 = b_x->size[0] * b_x->size[1];
b_x->size[0] = 1;
b_x->size[1] = nxout;
emxEnsureCapacity((emxArray__common *)b_x, i7, (int32_T)sizeof(real_T));
for (i7 = 0; i7 < nxout; i7++) {
k = 0;
while (k <= 0) {
b_x->data[b_x->size[0] * i7] = x->data[r14->data[i7] - 1];
k = 1;
}
}
i7 = x->size[0] * x->size[1];
x->size[0] = b_x->size[0];
x->size[1] = b_x->size[1];
emxEnsureCapacity((emxArray__common *)x, i7, (int32_T)sizeof(real_T));
nxout = b_x->size[1];
for (i7 = 0; i7 < nxout; i7++) {
k0 = b_x->size[0];
for (k = 0; k < k0; k++) {
x->data[k + x->size[0] * i7] = b_x->data[k + b_x->size[0] * i7];
}
}
}
emxFree_real_T(&c_x);
emxFree_real_T(&b_x);
emxFree_int32_T(&r14);
} else {
emxInit_boolean_T(&b, 2);
i7 = b->size[0] * b->size[1];
b->size[0] = 1;
b->size[1] = nxin;
emxEnsureCapacity((emxArray__common *)b, i7, (int32_T)sizeof(boolean_T));
for (i7 = 0; i7 < nxin; i7++) {
b->data[i7] = FALSE;
}
for (k = 1; k <= idx->size[1]; k++) {
b->data[(int32_T)idx->data[k - 1] - 1] = TRUE;
}
nxout = 0;
for (k = 1; k <= b->size[1]; k++) {
nxout += b->data[k - 1];
}
nxout = nxin - nxout;
k0 = -1;
for (k = 1; k <= nxin; k++) {
if ((k > b->size[1]) || (!b->data[k - 1])) {
k0++;
x->data[k0] = x->data[k - 1];
}
}
emxFree_boolean_T(&b);
emxInit_int32_T(&r14, 1);
emxInit_real_T(&b_x, 2);
b_emxInit_real_T(&c_x, 1);
if ((x->size[0] != 1) && (x->size[1] == 1)) {
if (1 > nxout) {
nxout = 0;
}
i7 = c_x->size[0];
c_x->size[0] = nxout;
emxEnsureCapacity((emxArray__common *)c_x, i7, (int32_T)sizeof(real_T));
for (i7 = 0; i7 < nxout; i7++) {
c_x->data[i7] = x->data[i7];
}
i7 = x->size[0] * x->size[1];
x->size[0] = nxout;
x->size[1] = 1;
emxEnsureCapacity((emxArray__common *)x, i7, (int32_T)sizeof(real_T));
i7 = 0;
while (i7 <= 0) {
for (i7 = 0; i7 < nxout; i7++) {
x->data[i7] = c_x->data[i7];
}
i7 = 1;
}
} else {
if (1 > nxout) {
nxout = 0;
}
i7 = r14->size[0];
r14->size[0] = nxout;
emxEnsureCapacity((emxArray__common *)r14, i7, (int32_T)sizeof(int32_T));
for (i7 = 0; i7 < nxout; i7++) {
r14->data[i7] = 1 + i7;
}
nxout = r14->size[0];
i7 = b_x->size[0] * b_x->size[1];
b_x->size[0] = 1;
b_x->size[1] = nxout;
emxEnsureCapacity((emxArray__common *)b_x, i7, (int32_T)sizeof(real_T));
for (i7 = 0; i7 < nxout; i7++) {
k = 0;
while (k <= 0) {
b_x->data[b_x->size[0] * i7] = x->data[r14->data[i7] - 1];
k = 1;
}
}
i7 = x->size[0] * x->size[1];
x->size[0] = b_x->size[0];
x->size[1] = b_x->size[1];
emxEnsureCapacity((emxArray__common *)x, i7, (int32_T)sizeof(real_T));
nxout = b_x->size[1];
for (i7 = 0; i7 < nxout; i7++) {
k0 = b_x->size[0];
for (k = 0; k < k0; k++) {
x->data[k + x->size[0] * i7] = b_x->data[k + b_x->size[0] * i7];
}
}
}
emxFree_real_T(&c_x);
emxFree_real_T(&b_x);
emxFree_int32_T(&r14);
}
}
static real_T c_std(const real_T varargin_1[17])
{
real_T y;
int32_T ix;
real_T xbar;
int32_T k;
real_T r;
ix = 0;
xbar = varargin_1[0];
for (k = 0; k < 16; k++) {
ix++;
xbar += varargin_1[ix];
}
xbar /= 17.0;
ix = 0;
r = varargin_1[0] - xbar;
y = r * r;
for (k = 0; k < 16; k++) {
ix++;
r = varargin_1[ix] - xbar;
y += r * r;
}
y /= 17.0;
return sqrt(y);
}
static int32_T div_s32(int32_T numerator, int32_T denominator)
{
int32_T quotient;
uint32_T absNumerator;
uint32_T absDenominator;
int32_T quotientNeedsNegation;
if (denominator == 0) {
if (numerator >= 0) {
quotient = MAX_int32_T;
} else {
quotient = MIN_int32_T;
}
} else {
if (numerator >= 0) {
absNumerator = (uint32_T)numerator;
} else {
absNumerator = (uint32_T)-numerator;
}
if (denominator >= 0) {
absDenominator = (uint32_T)denominator;
} else {
absDenominator = (uint32_T)-denominator;
}
quotientNeedsNegation = ((numerator < 0) != (denominator < 0));
absNumerator /= absDenominator;
if ((uint32_T)quotientNeedsNegation) {
quotient = -(int32_T)absNumerator;
} else {
quotient = (int32_T)absNumerator;
}
}
return quotient;
}
static void eml_li_find(const emxArray_boolean_T *x, emxArray_int32_T *y)
{
int32_T n;
int32_T k;
int32_T i;
int32_T j;
n = x->size[0] * x->size[1];
k = 0;
for (i = 1; i <= n; i++) {
if (x->data[i - 1]) {
k++;
}
}
j = y->size[0];
y->size[0] = k;
emxEnsureCapacity((emxArray__common *)y, j, (int32_T)sizeof(int32_T));
j = 0;
for (i = 1; i <= n; i++) {
if (x->data[i - 1]) {
y->data[j] = i;
j++;
}
}
}
static void eml_null_assignment(emxArray_boolean_T *x)
{
emxArray_boolean_T *b;
int32_T nxin;
int32_T i5;
int32_T k;
int32_T nxout;
int32_T k0;
emxArray_int32_T *r12;
emxArray_boolean_T *b_x;
emxArray_boolean_T *c_x;
emxInit_boolean_T(&b, 2);
nxin = x->size[0] * x->size[1];
i5 = b->size[0] * b->size[1];
b->size[0] = 1;
b->size[1] = nxin;
emxEnsureCapacity((emxArray__common *)b, i5, (int32_T)sizeof(boolean_T));
for (i5 = 0; i5 < nxin; i5++) {
b->data[i5] = FALSE;
}
for (k = 0; k < 2; k++) {
b->data[k] = TRUE;
}
nxout = 0;
for (k = 1; k <= b->size[1]; k++) {
nxout += b->data[k - 1];
}
nxout = nxin - nxout;
k0 = -1;
for (k = 1; k <= nxin; k++) {
if ((k > b->size[1]) || (!b->data[k - 1])) {
k0++;
x->data[k0] = x->data[k - 1];
}
}
emxFree_boolean_T(&b);
emxInit_int32_T(&r12, 1);
emxInit_boolean_T(&b_x, 2);
b_emxInit_boolean_T(&c_x, 1);
if ((x->size[0] != 1) && (x->size[1] == 1)) {
if (1 > nxout) {
nxout = 0;
}
i5 = c_x->size[0];
c_x->size[0] = nxout;
emxEnsureCapacity((emxArray__common *)c_x, i5, (int32_T)sizeof(boolean_T));
for (i5 = 0; i5 < nxout; i5++) {
c_x->data[i5] = x->data[i5];
}
i5 = x->size[0] * x->size[1];
x->size[0] = nxout;
x->size[1] = 1;
emxEnsureCapacity((emxArray__common *)x, i5, (int32_T)sizeof(boolean_T));
i5 = 0;
while (i5 <= 0) {
for (i5 = 0; i5 < nxout; i5++) {
x->data[i5] = c_x->data[i5];
}
i5 = 1;
}
} else {
if (1 > nxout) {
nxout = 0;
}
i5 = r12->size[0];
r12->size[0] = nxout;
emxEnsureCapacity((emxArray__common *)r12, i5, (int32_T)sizeof(int32_T));
for (i5 = 0; i5 < nxout; i5++) {
r12->data[i5] = 1 + i5;
}
nxout = r12->size[0];
i5 = b_x->size[0] * b_x->size[1];
b_x->size[0] = 1;
b_x->size[1] = nxout;
emxEnsureCapacity((emxArray__common *)b_x, i5, (int32_T)sizeof(boolean_T));
for (i5 = 0; i5 < nxout; i5++) {
k = 0;
while (k <= 0) {
b_x->data[b_x->size[0] * i5] = x->data[r12->data[i5] - 1];
k = 1;
}
}
i5 = x->size[0] * x->size[1];
x->size[0] = b_x->size[0];
x->size[1] = b_x->size[1];
emxEnsureCapacity((emxArray__common *)x, i5, (int32_T)sizeof(boolean_T));
nxout = b_x->size[1];
for (i5 = 0; i5 < nxout; i5++) {
k0 = b_x->size[0];
for (k = 0; k < k0; k++) {
x->data[k + x->size[0] * i5] = b_x->data[k + b_x->size[0] * i5];
}
}
}
emxFree_boolean_T(&c_x);
emxFree_boolean_T(&b_x);
emxFree_int32_T(&r12);
}
static void eml_sort(const real_T x[17], real_T y[17], int32_T idx[17])
{
int32_T k;
boolean_T p;
int8_T idx0[17];
int32_T i;
int32_T i2;
int32_T j;
int32_T pEnd;
int32_T b_p;
int32_T q;
int32_T qEnd;
int32_T kEnd;
for (k = 0; k < 17; k++) {
idx[k] = k + 1;
}
for (k = 0; k < 15; k += 2) {
if ((x[k] <= x[k + 1]) || rtIsNaN(x[k + 1])) {
p = TRUE;
} else {
p = FALSE;
}
if (p) {
} else {
idx[k] = k + 2;
idx[k + 1] = k + 1;
}
}
for (i = 0; i < 17; i++) {
idx0[i] = 1;
}
i = 2;
while (i < 17) {
i2 = i << 1;
j = 1;
for (pEnd = 1 + i; pEnd < 18; pEnd = qEnd + i) {
b_p = j;
q = pEnd - 1;
qEnd = j + i2;
if (qEnd > 18) {
qEnd = 18;
}
k = 0;
kEnd = qEnd - j;
while (k + 1 <= kEnd) {
if ((x[idx[b_p - 1] - 1] <= x[idx[q] - 1]) || rtIsNaN(x[idx[q] - 1])) {
p = TRUE;
} else {
p = FALSE;
}
if (p) {
idx0[k] = (int8_T)idx[b_p - 1];
b_p++;
if (b_p == pEnd) {
while (q + 1 < qEnd) {
k++;
idx0[k] = (int8_T)idx[q];
q++;
}
}
} else {
idx0[k] = (int8_T)idx[q];
q++;
if (q + 1 == qEnd) {
while (b_p < pEnd) {
k++;
idx0[k] = (int8_T)idx[b_p - 1];
b_p++;
}
}
}
k++;
}
for (k = 0; k + 1 <= kEnd; k++) {
idx[(j + k) - 1] = idx0[k];
}
j = qEnd;
}
i = i2;
}
for (k = 0; k < 17; k++) {
y[k] = x[idx[k] - 1];
}
}
static void emxEnsureCapacity(emxArray__common *emxArray, int32_T oldNumel,
int32_T elementSize)
{
int32_T newNumel;
int32_T i;
void *newData;
newNumel = 1;
for (i = 0; i < emxArray->numDimensions; i++) {
newNumel *= emxArray->size[i];
}
if (newNumel > emxArray->allocatedSize) {
i = emxArray->allocatedSize;
if (i < 16) {
i = 16;
}
while (i < newNumel) {
i <<= 1;
}
newData = calloc((uint32_T)i, (uint32_T)elementSize);
if (emxArray->data != NULL) {
memcpy(newData, emxArray->data, (uint32_T)(elementSize * oldNumel));
if (emxArray->canFreeData) {
free(emxArray->data);
}
}
emxArray->data = newData;
emxArray->allocatedSize = i;
emxArray->canFreeData = TRUE;
}
}
static void emxFree_boolean_T(emxArray_boolean_T **pEmxArray)
{
if (*pEmxArray != (emxArray_boolean_T *)NULL) {
if ((*pEmxArray)->canFreeData) {
free((void *)(*pEmxArray)->data);
}
free((void *)(*pEmxArray)->size);
free((void *)*pEmxArray);
*pEmxArray = (emxArray_boolean_T *)NULL;
}
}
static void emxFree_int32_T(emxArray_int32_T **pEmxArray)
{
if (*pEmxArray != (emxArray_int32_T *)NULL) {
if ((*pEmxArray)->canFreeData) {
free((void *)(*pEmxArray)->data);
}
free((void *)(*pEmxArray)->size);
free((void *)*pEmxArray);
*pEmxArray = (emxArray_int32_T *)NULL;
}
}
static void emxFree_real_T(emxArray_real_T **pEmxArray)
{
if (*pEmxArray != (emxArray_real_T *)NULL) {
if ((*pEmxArray)->canFreeData) {
free((void *)(*pEmxArray)->data);
}
free((void *)(*pEmxArray)->size);
free((void *)*pEmxArray);
*pEmxArray = (emxArray_real_T *)NULL;
}
}
static void emxInit_boolean_T(emxArray_boolean_T **pEmxArray, int32_T
numDimensions)
{
emxArray_boolean_T *emxArray;
int32_T i;
*pEmxArray = (emxArray_boolean_T *)malloc(sizeof(emxArray_boolean_T));
emxArray = *pEmxArray;
emxArray->data = (boolean_T *)NULL;
emxArray->numDimensions = numDimensions;
emxArray->size = (int32_T *)malloc((uint32_T)(sizeof(int32_T) * numDimensions));
emxArray->allocatedSize = 0;
emxArray->canFreeData = TRUE;
for (i = 0; i < numDimensions; i++) {
emxArray->size[i] = 0;
}
}
static void emxInit_int32_T(emxArray_int32_T **pEmxArray, int32_T numDimensions)
{
emxArray_int32_T *emxArray;
int32_T i;
*pEmxArray = (emxArray_int32_T *)malloc(sizeof(emxArray_int32_T));
emxArray = *pEmxArray;
emxArray->data = (int32_T *)NULL;
emxArray->numDimensions = numDimensions;
emxArray->size = (int32_T *)malloc((uint32_T)(sizeof(int32_T) * numDimensions));
emxArray->allocatedSize = 0;
emxArray->canFreeData = TRUE;
for (i = 0; i < numDimensions; i++) {
emxArray->size[i] = 0;
}
}
static void emxInit_real_T(emxArray_real_T **pEmxArray, int32_T numDimensions)
{
emxArray_real_T *emxArray;
int32_T i;
*pEmxArray = (emxArray_real_T *)malloc(sizeof(emxArray_real_T));
emxArray = *pEmxArray;
emxArray->data = (real_T *)NULL;
emxArray->numDimensions = numDimensions;
emxArray->size = (int32_T *)malloc((uint32_T)(sizeof(int32_T) * numDimensions));
emxArray->allocatedSize = 0;
emxArray->canFreeData = TRUE;
for (i = 0; i < numDimensions; i++) {
emxArray->size[i] = 0;
}
}
static real_T featureSpectralCentroid(real_T S[17])
{
real_T y;
int32_T i;
real_T b_y;
/* FEATURESPECTRALCENTROID Computes spectral centroid feature */
/* It is the mass center of the spectrum */
/* [r,c] = size(S); */
/* feature = sum(repmat((1:r)',1,c).* S)./sum(S); */
y = 0.0;
for (i = 0; i < 17; i++) {
b_y = S[i] * S[i];
y += (((real_T)i + 1.0) - 1.0) * b_y;
S[i] = b_y;
}
b_y = S[0];
for (i = 0; i < 16; i++) {
b_y += S[i + 1];
}
return y / b_y;
}
static real_T featureSpectralCrest(const real_T S[17])
{
int32_T ixstart;
real_T mtmp;
int32_T ix;
boolean_T exitg1;
real_T y;
/* FEATURESPECTRALCREST Computes spectral crest */
/* It is a rough measure of tonality */
ixstart = 1;
mtmp = S[0];
if (rtIsNaN(S[0])) {
ix = 2;
exitg1 = FALSE;
while ((exitg1 == FALSE) && (ix < 18)) {
ixstart = ix;
if (!rtIsNaN(S[ix - 1])) {
mtmp = S[ix - 1];
exitg1 = TRUE;
} else {
ix++;
}
}
}
if (ixstart < 17) {
while (ixstart + 1 < 18) {
if (S[ixstart] > mtmp) {
mtmp = S[ixstart];
}
ixstart++;
}
}
y = S[0];
for (ixstart = 0; ixstart < 16; ixstart++) {
y += S[ixstart + 1];
}
return mtmp / y;
}
static void filter(const emxArray_real_T *x, real_T zi, emxArray_real_T *y)
{
uint32_T unnamed_idx_0;
int32_T j;
real_T dbuffer[2];
int32_T k;
real_T b_dbuffer;
unnamed_idx_0 = (uint32_T)x->size[0];
j = y->size[0];
y->size[0] = (int32_T)unnamed_idx_0;
emxEnsureCapacity((emxArray__common *)y, j, (int32_T)sizeof(real_T));
dbuffer[1] = zi;
for (j = 0; j + 1 <= x->size[0]; j++) {
dbuffer[0] = dbuffer[1];
dbuffer[1] = 0.0;
for (k = 0; k < 2; k++) {
b_dbuffer = dbuffer[k] + x->data[j] * 0.33333333333333331;
dbuffer[k] = b_dbuffer;
}
y->data[j] = dbuffer[0];
}
}
static void filtfilt(const emxArray_real_T *x_in, emxArray_real_T *y_out)
{
emxArray_real_T *x;
int32_T i2;
int32_T loop_ub;
emxArray_real_T *y;
real_T xtmp;
real_T b_y;
int32_T md2;
emxArray_real_T *c_y;
int32_T m;
emxArray_real_T *d_y;
emxArray_int32_T *r6;
b_emxInit_real_T(&x, 1);
if (x_in->size[0] == 1) {
i2 = x->size[0];
x->size[0] = 1;
emxEnsureCapacity((emxArray__common *)x, i2, (int32_T)sizeof(real_T));
x->data[0] = x_in->data[0];
} else {
i2 = x->size[0];
x->size[0] = x_in->size[0];
emxEnsureCapacity((emxArray__common *)x, i2, (int32_T)sizeof(real_T));
loop_ub = x_in->size[0];
for (i2 = 0; i2 < loop_ub; i2++) {
x->data[i2] = x_in->data[i2];
}
}
if (x->size[0] == 0) {
i2 = y_out->size[0] * y_out->size[1];
y_out->size[0] = 0;
y_out->size[1] = 0;
emxEnsureCapacity((emxArray__common *)y_out, i2, (int32_T)sizeof(real_T));
} else {
b_emxInit_real_T(&y, 1);
xtmp = 2.0 * x->data[0];
b_y = 2.0 * x->data[x->size[0] - 1];
md2 = x->size[0] - 1;
i2 = y->size[0];
y->size[0] = 6 + x->size[0];
emxEnsureCapacity((emxArray__common *)y, i2, (int32_T)sizeof(real_T));
for (i2 = 0; i2 < 3; i2++) {
y->data[i2] = xtmp - x->data[3 - i2];
}
loop_ub = x->size[0];
for (i2 = 0; i2 < loop_ub; i2++) {
y->data[i2 + 3] = x->data[i2];
}
for (i2 = 0; i2 < 3; i2++) {
y->data[(i2 + x->size[0]) + 3] = b_y - x->data[(md2 - i2) - 1];
}
b_emxInit_real_T(&c_y, 1);
i2 = c_y->size[0];
c_y->size[0] = y->size[0];
emxEnsureCapacity((emxArray__common *)c_y, i2, (int32_T)sizeof(real_T));
loop_ub = y->size[0];
for (i2 = 0; i2 < loop_ub; i2++) {
c_y->data[i2] = y->data[i2];
}
xtmp = y->data[0];
filter(c_y, 0.33333333333333331 * xtmp, y);
m = y->size[0];
i2 = y->size[0];
md2 = i2 / 2;
loop_ub = 1;
emxFree_real_T(&c_y);
while (loop_ub <= md2) {
xtmp = y->data[loop_ub - 1];
y->data[loop_ub - 1] = y->data[m - loop_ub];
y->data[m - loop_ub] = xtmp;
loop_ub++;
}
b_emxInit_real_T(&d_y, 1);
i2 = d_y->size[0];
d_y->size[0] = y->size[0];
emxEnsureCapacity((emxArray__common *)d_y, i2, (int32_T)sizeof(real_T));
loop_ub = y->size[0];
for (i2 = 0; i2 < loop_ub; i2++) {
d_y->data[i2] = y->data[i2];
}
xtmp = y->data[0];
filter(d_y, 0.33333333333333331 * xtmp, y);
m = y->size[0];
i2 = y->size[0];
md2 = i2 / 2;
loop_ub = 1;
emxFree_real_T(&d_y);
while (loop_ub <= md2) {
xtmp = y->data[loop_ub - 1];
y->data[loop_ub - 1] = y->data[m - loop_ub];
y->data[m - loop_ub] = xtmp;
loop_ub++;
}
if (x_in->size[0] == 1) {
emxInit_int32_T(&r6, 1);
loop_ub = (int32_T)((real_T)x->size[0] + 3.0) - 4;
i2 = r6->size[0];
r6->size[0] = loop_ub + 1;
emxEnsureCapacity((emxArray__common *)r6, i2, (int32_T)sizeof(int32_T));
for (i2 = 0; i2 <= loop_ub; i2++) {
r6->data[i2] = 4 + i2;
}
i2 = y_out->size[0] * y_out->size[1];
y_out->size[0] = 1;
emxEnsureCapacity((emxArray__common *)y_out, i2, (int32_T)sizeof(real_T));
md2 = r6->size[0];
i2 = y_out->size[0] * y_out->size[1];
y_out->size[1] = md2;
emxEnsureCapacity((emxArray__common *)y_out, i2, (int32_T)sizeof(real_T));
loop_ub = r6->size[0];
for (i2 = 0; i2 < loop_ub; i2++) {
y_out->data[i2] = y->data[r6->data[i2] - 1];
}
emxFree_int32_T(&r6);
} else {
emxInit_int32_T(&r6, 1);
loop_ub = (int32_T)((real_T)x->size[0] + 3.0) - 3;
i2 = y_out->size[0] * y_out->size[1];
y_out->size[0] = loop_ub;
y_out->size[1] = 1;
emxEnsureCapacity((emxArray__common *)y_out, i2, (int32_T)sizeof(real_T));
md2 = (int32_T)((real_T)x->size[0] + 3.0) - 4;
i2 = r6->size[0];
r6->size[0] = md2 + 1;
emxEnsureCapacity((emxArray__common *)r6, i2, (int32_T)sizeof(int32_T));
for (i2 = 0; i2 <= md2; i2++) {
r6->data[i2] = 4 + i2;
}
for (i2 = 0; i2 < loop_ub; i2++) {
y_out->data[i2] = y->data[r6->data[i2] - 1];
}
emxFree_int32_T(&r6);
}
emxFree_real_T(&y);
}
emxFree_real_T(&x);
}
static void histogramFeatures(const real_T ioiHist[17], real_T features[12])
{
int32_T iidx[17];
real_T k[17];
int32_T ind[17];
boolean_T S[17];
int32_T cindx;
real_T xlast;
int32_T b_k;
real_T b_ioiHist[17];
real_T sigma;
real_T b_S[17];
real_T b[17];
int32_T ix;
boolean_T x;
int8_T S_size[2];
int8_T outsz[2];
int32_T loop_ub;
int32_T iindx_data[1];
int32_T b_ix;
int32_T indx_data[1];
/* UNTITLED Summary of this function goes here */
/* Detailed explanation goes here */
/* features(1) = mean(ioiHist); */
features[0] = b_std(ioiHist);
eml_sort(ioiHist, k, iidx);
features[1] = k[16] / k[15];
for (cindx = 0; cindx < 17; cindx++) {
ind[cindx] = iidx[cindx];
S[cindx] = (ioiHist[cindx] == 0.0);
}
features[2] = (real_T)ind[16] / (real_T)ind[15];
xlast = S[0];
for (b_k = 0; b_k < 16; b_k++) {
xlast += (real_T)S[b_k + 1];
}
features[3] = xlast / 17.0;
for (cindx = 0; cindx < 17; cindx++) {
b_ioiHist[cindx] = ioiHist[cindx];
/* FEATURESPECTRALDECREASE Computes the Spectral Decrease */
/* A measure of steepness of spectral envelope over frequency */
k[cindx] = cindx;
}
features[4] = featureSpectralCentroid(b_ioiHist);
features[5] = featureSpectralCrest(ioiHist);
k[0] = 1.0;
xlast = 0.0;
for (cindx = 0; cindx < 17; cindx++) {
/* compute slope */
xlast += 1.0 / k[cindx] * (ioiHist[cindx] - ioiHist[0]);
}
sigma = ioiHist[1];
for (b_k = 0; b_k < 15; b_k++) {
sigma += ioiHist[b_k + 2];
}
features[6] = xlast / sigma;
/* FEATURESPECTRALKURTOSIS Computes the Spectral Kurtosis */
/* It is a measure of gaussianity of a spectrum */
sigma = c_std(ioiHist);
/* Subtracting means */
xlast = ioiHist[0];
for (b_k = 0; b_k < 16; b_k++) {
xlast += ioiHist[b_k + 1];
}
bsxfun(ioiHist, xlast / 17.0, b_S);
for (b_k = 0; b_k < 17; b_k++) {
k[b_k] = rt_powd_snf(b_S[b_k], 4.0) / (rt_powd_snf(sigma, 4.0) * 17.0);
}
sigma = k[0];
for (b_k = 0; b_k < 16; b_k++) {
sigma += k[b_k + 1];
}
features[7] = sigma;
/* FEATURESPECTRALROLLOFF Computes Spectral Rolloff */
/* Finds frequency bin where cumsum reaches 0.85 of magnitude */
/* compute rolloff */
xlast = ioiHist[0];
for (b_k = 0; b_k < 16; b_k++) {
xlast += ioiHist[b_k + 1];
}
sigma = 0.85 * xlast;
/* Find indices where cumulative sum is greater */
memcpy(&b[0], &ioiHist[0], 17U * sizeof(real_T));
ix = 0;
xlast = ioiHist[0];
for (b_k = 0; b_k < 16; b_k++) {
ix++;
xlast += b[ix];
b[ix] = xlast;
}
for (b_k = 0; b_k < 17; b_k++) {
S[b_k] = (b[b_k] >= sigma);
}
/* Find the maximum value */
xlast = S[0];
for (b_k = 0; b_k < 16; b_k++) {
xlast += (real_T)S[b_k + 1];
}
x = (xlast > 0.0);
b_k = 0;
if (x) {
b_k = 1;
}
S_size[0] = 17;
S_size[1] = (int8_T)b_k;
for (cindx = 0; cindx < 2; cindx++) {
outsz[cindx] = S_size[cindx];
}
loop_ub = outsz[1];
for (cindx = 0; cindx < loop_ub; cindx++) {
iindx_data[cindx] = 1;
}
ix = -16;
cindx = 1;
while (cindx <= b_k) {
ix += 17;
x = S[(ix - 1) % 17];
loop_ub = 1;
cindx = 1;
if (ix < ix + 16) {
for (b_ix = ix; b_ix + 1 <= ix + 16; b_ix++) {
cindx++;
if (S[b_ix % 17] > x) {
x = S[b_ix % 17];
loop_ub = cindx;
}
}
}
iindx_data[0] = loop_ub;
cindx = 2;
}
loop_ub = outsz[1];
for (cindx = 0; cindx < loop_ub; cindx++) {
indx_data[cindx] = iindx_data[cindx];
}
features[8] = indx_data[0];
/* FEATURESPECTRALSKEWNESS Compute spectral skewness */
/* A measure of symmettricity of pdf */
sigma = c_std(ioiHist);
/* Subtracting means */
xlast = ioiHist[0];
for (b_k = 0; b_k < 16; b_k++) {
xlast += ioiHist[b_k + 1];
}
bsxfun(ioiHist, xlast / 17.0, b_S);
for (b_k = 0; b_k < 17; b_k++) {
k[b_k] = rt_powd_snf(b_S[b_k], 3.0) / (rt_powd_snf(sigma, 3.0) * 17.0);
}
sigma = k[0];
for (b_k = 0; b_k < 16; b_k++) {
sigma += k[b_k + 1];
}
features[9] = sigma;
/* FUNCTIONSPECTRALSLOPE Computes the spectral slope */
/* */
/* compute index vector */
/* compute slope */
xlast = ioiHist[0];
for (b_k = 0; b_k < 16; b_k++) {
xlast += ioiHist[b_k + 1];
}
bsxfun(ioiHist, xlast / 17.0, b_S);
xlast = 0.0;
sigma = 0.0;
for (b_k = 0; b_k < 17; b_k++) {
xlast += (-8.5 + (((real_T)b_k + 1.0) - 1.0)) * b_S[b_k];
sigma += (-8.5 + (((real_T)b_k + 1.0) - 1.0)) * (-8.5 + (((real_T)b_k + 1.0)
- 1.0));
b_ioiHist[b_k] = ioiHist[b_k];
k[b_k] = b_k;
b_S[b_k] = ioiHist[b_k] * ioiHist[b_k];
}
features[10] = xlast / sigma;
/* FEATURESPECTRALSPREAD Computes spectral spread */
/* Concentration of energy around spectral centroid */
bsxfun(k, featureSpectralCentroid(b_ioiHist), b);
for (b_k = 0; b_k < 17; b_k++) {
k[b_k] = b[b_k] * b[b_k] * b_S[b_k];
}
xlast = k[0];
sigma = b_S[0];
for (b_k = 0; b_k < 16; b_k++) {
xlast += k[b_k + 1];
sigma += b_S[b_k + 1];
}
features[11] = sqrt(xlast / sigma);
}
static void ioiHistogram(emxArray_boolean_T *onsets, const emxArray_real_T *T,
real_T ioiHist[17])
{
emxArray_real_T *tOnset;
emxArray_int32_T *r10;
int32_T high_i;
int32_T ixLead;
emxArray_real_T *ioi;
emxArray_real_T *b_y1;
int32_T iyLead;
real_T work_data_idx_0;
real_T tmp1;
real_T tmp2;
int32_T sz[2];
real_T meanIOI_data[1];
int32_T meanIOI_size[2];
int32_T k;
emxArray_real_T *r11;
emxArray_real_T b_meanIOI_data;
int32_T d;
real_T stdIOI_data[1];
boolean_T goodInd_data[1];
real_T histEdges[17];
int32_T exitg1;
b_emxInit_real_T(&tOnset, 1);
emxInit_int32_T(&r10, 1);
/* UNTITLED2 Summary of this function goes here */
/* Detailed explanation goes here */
/* Setting the first one as true */
onsets->data[0] = TRUE;
/* onsetInd = onsets; */
eml_li_find(onsets, r10);
high_i = tOnset->size[0];
tOnset->size[0] = r10->size[0];
emxEnsureCapacity((emxArray__common *)tOnset, high_i, (int32_T)sizeof(real_T));
ixLead = r10->size[0];
for (high_i = 0; high_i < ixLead; high_i++) {
tOnset->data[high_i] = T->data[r10->data[high_i] - 1];
}
emxFree_int32_T(&r10);
emxInit_real_T(&ioi, 2);
if (tOnset->size[0] == 0) {
high_i = ioi->size[0] * ioi->size[1];
ioi->size[0] = 0;
ioi->size[1] = 1;
emxEnsureCapacity((emxArray__common *)ioi, high_i, (int32_T)sizeof(real_T));
} else {
ixLead = tOnset->size[0] - 1;
if (ixLead <= 1) {
} else {
ixLead = 1;
}
if (ixLead < 1) {
high_i = ioi->size[0] * ioi->size[1];
ioi->size[0] = 0;
ioi->size[1] = 0;
emxEnsureCapacity((emxArray__common *)ioi, high_i, (int32_T)sizeof(real_T));
} else {
b_emxInit_real_T(&b_y1, 1);
high_i = b_y1->size[0];
b_y1->size[0] = tOnset->size[0] - 1;
emxEnsureCapacity((emxArray__common *)b_y1, high_i, (int32_T)sizeof(real_T));
ixLead = 1;
iyLead = 0;
work_data_idx_0 = tOnset->data[0];
for (high_i = 2; high_i <= tOnset->size[0]; high_i++) {
tmp1 = tOnset->data[ixLead];
tmp2 = work_data_idx_0;
work_data_idx_0 = tmp1;
tmp1 -= tmp2;
ixLead++;
b_y1->data[iyLead] = tmp1;
iyLead++;
}
ixLead = b_y1->size[0];
high_i = ioi->size[0] * ioi->size[1];
ioi->size[0] = ixLead;
emxEnsureCapacity((emxArray__common *)ioi, high_i, (int32_T)sizeof(real_T));
high_i = ioi->size[0] * ioi->size[1];
ioi->size[1] = 1;
emxEnsureCapacity((emxArray__common *)ioi, high_i, (int32_T)sizeof(real_T));
ixLead = b_y1->size[0];
for (high_i = 0; high_i < ixLead; high_i++) {
ioi->data[high_i] = b_y1->data[high_i];
}
emxFree_real_T(&b_y1);
}
}
emxFree_real_T(&tOnset);
for (high_i = 0; high_i < 2; high_i++) {
sz[high_i] = ioi->size[high_i];
}
meanIOI_size[0] = 1;
meanIOI_size[1] = sz[1];
if ((ioi->size[0] == 0) || (ioi->size[1] == 0)) {
meanIOI_size[0] = 1;
meanIOI_size[1] = sz[1];
ixLead = sz[1];
for (high_i = 0; high_i < ixLead; high_i++) {
meanIOI_data[high_i] = 0.0;
}
} else {
iyLead = -1;
work_data_idx_0 = ioi->data[0];
for (k = 2; k <= ioi->size[0]; k++) {
iyLead++;
work_data_idx_0 += ioi->data[iyLead + 1];
}
meanIOI_data[0] = work_data_idx_0;
}
emxInit_real_T(&r11, 2);
b_meanIOI_data.data = (real_T *)&meanIOI_data;
b_meanIOI_data.size = (int32_T *)&meanIOI_size;
b_meanIOI_data.allocatedSize = 1;
b_meanIOI_data.numDimensions = 2;
b_meanIOI_data.canFreeData = FALSE;
rdivide(&b_meanIOI_data, ioi->size[0], r11);
meanIOI_size[0] = 1;
meanIOI_size[1] = r11->size[1];
ixLead = r11->size[0] * r11->size[1];
for (high_i = 0; high_i < ixLead; high_i++) {
meanIOI_data[high_i] = r11->data[high_i];
}
emxFree_real_T(&r11);
if (ioi->size[0] > 1) {
d = ioi->size[0] - 1;
} else {
d = ioi->size[0];
}
for (high_i = 0; high_i < 2; high_i++) {
sz[high_i] = ioi->size[high_i];
}
iyLead = 0;
ixLead = 1;
while (ixLead <= ioi->size[1]) {
if ((ioi->size[0] == 0) || (ioi->size[1] == 0)) {
work_data_idx_0 = rtNaN;
} else {
ixLead = iyLead;
tmp1 = ioi->data[iyLead];
for (k = 0; k <= ioi->size[0] - 2; k++) {
ixLead++;
tmp1 += ioi->data[ixLead];
}
tmp1 /= (real_T)ioi->size[0];
ixLead = iyLead;
tmp2 = ioi->data[iyLead] - tmp1;
work_data_idx_0 = tmp2 * tmp2;
for (k = 0; k <= ioi->size[0] - 2; k++) {
ixLead++;
tmp2 = ioi->data[ixLead] - tmp1;
work_data_idx_0 += tmp2 * tmp2;
}
work_data_idx_0 /= (real_T)d;
}
stdIOI_data[0] = work_data_idx_0;
iyLead += ioi->size[0];
ixLead = 2;
}
k = 0;
while (k <= sz[1] - 1) {
stdIOI_data[0] = sqrt(stdIOI_data[0]);
k = 1;
}
iyLead = ioi->size[1];
ixLead = ioi->size[0] * ioi->size[1];
for (high_i = 0; high_i < ixLead; high_i++) {
goodInd_data[high_i] = ((ioi->data[high_i] > meanIOI_data[high_i] - 2.0 *
stdIOI_data[high_i]) && (ioi->data[high_i] < meanIOI_data[high_i] + 2.0 *
stdIOI_data[high_i]));
}
k = 0;
ixLead = 1;
while (ixLead <= iyLead) {
if (goodInd_data[0]) {
k++;
}
ixLead = 2;
}
/* Avoiding code export bug */
/* ioi(ioi>upperThresh) = []; */
/* ioi(ioi<lowerThresh) = []; */
/* nBins = 16; */
for (high_i = 0; high_i < 17; high_i++) {
histEdges[high_i] = 0.125 * (real_T)high_i;
ioiHist[high_i] = 0.0;
}
histEdges[16] = rtInf;
high_i = 0;
do {
exitg1 = 0;
if (high_i < 16) {
if (histEdges[1 + high_i] < histEdges[high_i]) {
for (high_i = 0; high_i < 17; high_i++) {
ioiHist[high_i] = rtNaN;
}
exitg1 = 1;
} else {
high_i++;
}
} else {
ixLead = 0;
while (ixLead <= k - 1) {
ixLead = 0;
if (!rtIsNaN(ioi->data[0])) {
if ((ioi->data[0] >= 0.0) && (ioi->data[0] < rtInf)) {
ixLead = 1;
iyLead = 2;
high_i = 17;
while (high_i > iyLead) {
d = (ixLead + high_i) >> 1;
if (ioi->data[0] >= histEdges[d - 1]) {
ixLead = d;
iyLead = d + 1;
} else {
high_i = d;
}
}
}
if (ioi->data[0] == rtInf) {
ixLead = 17;
}
}
if (ixLead > 0) {
ioiHist[ixLead - 1]++;
}
ixLead = 1;
}
exitg1 = 1;
}
} while (exitg1 == 0);
emxFree_real_T(&ioi);
work_data_idx_0 = ioiHist[0];
for (k = 0; k < 16; k++) {
work_data_idx_0 += ioiHist[k + 1];
}
for (high_i = 0; high_i < 17; high_i++) {
ioiHist[high_i] /= work_data_idx_0;
}
}
static void onsetDetection(const emxArray_real_T *spec, emxArray_boolean_T
*onsets, emxArray_real_T *flux)
{
emxArray_real_T *b_flux;
int32_T ixstart;
real_T mtmp;
int32_T k0;
boolean_T exitg1;
int32_T i0;
emxArray_real_T *flux1;
int32_T varargin_1[2];
int32_T i;
boolean_T x[7];
int32_T k;
emxArray_real_T *r0;
int32_T nxin;
emxArray_real_T *r1;
emxArray_real_T *mask2;
emxArray_boolean_T *b;
emxArray_int32_T *r2;
emxArray_real_T *b_mask2;
emxArray_real_T *c_mask2;
emxArray_real_T *r3;
int32_T vstride;
int32_T npages;
int32_T dim;
int32_T j;
int32_T ia;
b_emxInit_real_T(&b_flux, 1);
/* UNTITLED2 Summary of this function goes here */
/* Detailed explanation goes here */
onsetFlux(spec, b_flux);
/* Normalizing */
ixstart = 1;
mtmp = b_flux->data[0];
if (b_flux->size[0] > 1) {
if (rtIsNaN(b_flux->data[0])) {
k0 = 2;
exitg1 = FALSE;
while ((exitg1 == FALSE) && (k0 <= b_flux->size[0])) {
ixstart = k0;
if (!rtIsNaN(b_flux->data[k0 - 1])) {
mtmp = b_flux->data[k0 - 1];
exitg1 = TRUE;
} else {
k0++;
}
}
}
if (ixstart < b_flux->size[0]) {
while (ixstart + 1 <= b_flux->size[0]) {
if (b_flux->data[ixstart] > mtmp) {
mtmp = b_flux->data[ixstart];
}
ixstart++;
}
}
}
i0 = b_flux->size[0];
emxEnsureCapacity((emxArray__common *)b_flux, i0, (int32_T)sizeof(real_T));
ixstart = b_flux->size[0];
for (i0 = 0; i0 < ixstart; i0++) {
b_flux->data[i0] /= mtmp;
}
emxInit_real_T(&flux1, 2);
/* Smoothing */
/* h=fdesign.lowpass('N,F3dB',12,0.15); */
/* d1 = design(h,'elliptic'); */
/* flux = filtfilt(d1.sosMatrix,d1.ScaleValues,flux); */
filtfilt(b_flux, flux);
/* h = 1/4*ones(4,1); */
/* flux = filter(h,1,flux); */
/* Peak picking */
/* w = 2; % Size of window to find local maxima */
padarray(flux, flux1);
emxFree_real_T(&b_flux);
for (i0 = 0; i0 < 2; i0++) {
varargin_1[i0] = flux1->size[i0];
}
i0 = onsets->size[0] * onsets->size[1];
onsets->size[0] = varargin_1[0];
emxEnsureCapacity((emxArray__common *)onsets, i0, (int32_T)sizeof(boolean_T));
i0 = onsets->size[0] * onsets->size[1];
onsets->size[1] = varargin_1[1];
emxEnsureCapacity((emxArray__common *)onsets, i0, (int32_T)sizeof(boolean_T));
ixstart = varargin_1[0] * varargin_1[1];
for (i0 = 0; i0 < ixstart; i0++) {
onsets->data[i0] = FALSE;
}
if ((0 == flux1->size[0]) || (0 == flux1->size[1])) {
ixstart = 0;
} else if (flux1->size[0] > flux1->size[1]) {
ixstart = flux1->size[0];
} else {
ixstart = flux1->size[1];
}
for (i = 3; i - 3 <= ixstart - 7; i++) {
mtmp = flux1->data[i];
for (i0 = 0; i0 < 7; i0++) {
x[i0] = (mtmp >= flux1->data[(i0 + i) - 3]);
}
mtmp = x[0];
for (k = 0; k < 6; k++) {
mtmp += (real_T)x[k + 1];
}
if (mtmp == 7.0) {
onsets->data[i] = TRUE;
}
}
b_emxInit_real_T(&r0, 1);
/* Remove m elements at the start and end */
eml_null_assignment(onsets);
mtmp = (real_T)(onsets->size[0] * onsets->size[1]) - 3.0;
i0 = onsets->size[0] * onsets->size[1];
nxin = r0->size[0];
r0->size[0] = (int32_T)((real_T)i0 - mtmp) + 1;
emxEnsureCapacity((emxArray__common *)r0, nxin, (int32_T)sizeof(real_T));
ixstart = (int32_T)((real_T)i0 - mtmp);
for (i0 = 0; i0 <= ixstart; i0++) {
r0->data[i0] = mtmp + (real_T)i0;
}
emxInit_real_T(&r1, 2);
i0 = r1->size[0] * r1->size[1];
r1->size[0] = 1;
emxEnsureCapacity((emxArray__common *)r1, i0, (int32_T)sizeof(real_T));
ixstart = r0->size[0];
i0 = r1->size[0] * r1->size[1];
r1->size[1] = ixstart;
emxEnsureCapacity((emxArray__common *)r1, i0, (int32_T)sizeof(real_T));
ixstart = r0->size[0];
for (i0 = 0; i0 < ixstart; i0++) {
r1->data[i0] = r0->data[i0];
}
emxFree_real_T(&r0);
b_eml_null_assignment(onsets, r1);
/* Perform second thresholding operation */
/* m = 1; % Multiplier so mean is calculated over a larger range before peak */
/* delta = 0.01; % Threshold above local mean */
padarray(flux, flux1);
for (i0 = 0; i0 < 2; i0++) {
varargin_1[i0] = flux1->size[i0];
}
emxInit_real_T(&mask2, 2);
i0 = mask2->size[0] * mask2->size[1];
mask2->size[0] = varargin_1[0];
emxEnsureCapacity((emxArray__common *)mask2, i0, (int32_T)sizeof(real_T));
i0 = mask2->size[0] * mask2->size[1];
mask2->size[1] = varargin_1[1];
emxEnsureCapacity((emxArray__common *)mask2, i0, (int32_T)sizeof(real_T));
ixstart = varargin_1[0] * varargin_1[1];
for (i0 = 0; i0 < ixstart; i0++) {
mask2->data[i0] = 0.0;
}
if ((0 == flux1->size[0]) || (0 == flux1->size[1])) {
ixstart = 0;
} else if (flux1->size[0] > flux1->size[1]) {
ixstart = flux1->size[0];
} else {
ixstart = flux1->size[1];
}
for (i = 3; i - 3 <= ixstart - 7; i++) {
/* flux = onsetDetection(denoisedSpec); */
mtmp = flux1->data[-3 + i];
for (k = 0; k < 6; k++) {
mtmp += flux1->data[(k + i) - 2];
}
if (flux1->data[i] >= mtmp / 7.0 + 0.01) {
mask2->data[i] = 1.0;
}
}
emxFree_real_T(&flux1);
emxInit_boolean_T(&b, 2);
/* Remove mw elements at the start and end */
nxin = mask2->size[0] * mask2->size[1];
i0 = b->size[0] * b->size[1];
b->size[0] = 1;
b->size[1] = nxin;
emxEnsureCapacity((emxArray__common *)b, i0, (int32_T)sizeof(boolean_T));
for (i0 = 0; i0 < nxin; i0++) {
b->data[i0] = FALSE;
}
for (k = 0; k < 2; k++) {
b->data[k] = TRUE;
}
ixstart = 0;
for (k = 1; k <= b->size[1]; k++) {
ixstart += b->data[k - 1];
}
ixstart = nxin - ixstart;
k0 = -1;
for (k = 1; k <= nxin; k++) {
if ((k > b->size[1]) || (!b->data[k - 1])) {
k0++;
mask2->data[k0] = mask2->data[k - 1];
}
}
emxInit_int32_T(&r2, 1);
emxInit_real_T(&b_mask2, 2);
b_emxInit_real_T(&c_mask2, 1);
if ((mask2->size[0] != 1) && (mask2->size[1] == 1)) {
if (1 > ixstart) {
ixstart = 0;
}
i0 = c_mask2->size[0];
c_mask2->size[0] = ixstart;
emxEnsureCapacity((emxArray__common *)c_mask2, i0, (int32_T)sizeof(real_T));
for (i0 = 0; i0 < ixstart; i0++) {
c_mask2->data[i0] = mask2->data[i0];
}
i0 = mask2->size[0] * mask2->size[1];
mask2->size[0] = ixstart;
mask2->size[1] = 1;
emxEnsureCapacity((emxArray__common *)mask2, i0, (int32_T)sizeof(real_T));
i0 = 0;
while (i0 <= 0) {
for (i0 = 0; i0 < ixstart; i0++) {
mask2->data[i0] = c_mask2->data[i0];
}
i0 = 1;
}
} else {
if (1 > ixstart) {
ixstart = 0;
}
i0 = r2->size[0];
r2->size[0] = ixstart;
emxEnsureCapacity((emxArray__common *)r2, i0, (int32_T)sizeof(int32_T));
for (i0 = 0; i0 < ixstart; i0++) {
r2->data[i0] = 1 + i0;
}
ixstart = r2->size[0];
i0 = b_mask2->size[0] * b_mask2->size[1];
b_mask2->size[0] = 1;
b_mask2->size[1] = ixstart;
emxEnsureCapacity((emxArray__common *)b_mask2, i0, (int32_T)sizeof(real_T));
for (i0 = 0; i0 < ixstart; i0++) {
nxin = 0;
while (nxin <= 0) {
b_mask2->data[b_mask2->size[0] * i0] = mask2->data[r2->data[i0] - 1];
nxin = 1;
}
}
i0 = mask2->size[0] * mask2->size[1];
mask2->size[0] = b_mask2->size[0];
mask2->size[1] = b_mask2->size[1];
emxEnsureCapacity((emxArray__common *)mask2, i0, (int32_T)sizeof(real_T));
ixstart = b_mask2->size[1];
for (i0 = 0; i0 < ixstart; i0++) {
k0 = b_mask2->size[0];
for (nxin = 0; nxin < k0; nxin++) {
mask2->data[nxin + mask2->size[0] * i0] = b_mask2->data[nxin +
b_mask2->size[0] * i0];
}
}
}
emxFree_real_T(&c_mask2);
emxFree_real_T(&b_mask2);
emxFree_int32_T(&r2);
b_emxInit_real_T(&r3, 1);
mtmp = (real_T)(mask2->size[0] * mask2->size[1]) - 3.0;
i0 = mask2->size[0] * mask2->size[1];
nxin = r3->size[0];
r3->size[0] = (int32_T)((real_T)i0 - mtmp) + 1;
emxEnsureCapacity((emxArray__common *)r3, nxin, (int32_T)sizeof(real_T));
ixstart = (int32_T)((real_T)i0 - mtmp);
for (i0 = 0; i0 <= ixstart; i0++) {
r3->data[i0] = mtmp + (real_T)i0;
}
i0 = r1->size[0] * r1->size[1];
r1->size[0] = 1;
emxEnsureCapacity((emxArray__common *)r1, i0, (int32_T)sizeof(real_T));
ixstart = r3->size[0];
i0 = r1->size[0] * r1->size[1];
r1->size[1] = ixstart;
emxEnsureCapacity((emxArray__common *)r1, i0, (int32_T)sizeof(real_T));
ixstart = r3->size[0];
for (i0 = 0; i0 < ixstart; i0++) {
r1->data[i0] = r3->data[i0];
}
emxFree_real_T(&r3);
c_eml_null_assignment(mask2, r1);
i0 = onsets->size[0] * onsets->size[1];
emxEnsureCapacity((emxArray__common *)onsets, i0, (int32_T)sizeof(boolean_T));
ixstart = onsets->size[0];
k0 = onsets->size[1];
ixstart *= k0;
emxFree_real_T(&r1);
for (i0 = 0; i0 < ixstart; i0++) {
onsets->data[i0] = (onsets->data[i0] && (mask2->data[i0] != 0.0));
}
emxFree_real_T(&mask2);
onsets->data[0] = FALSE;
if ((onsets->size[0] == 0) || (onsets->size[1] == 0) || ((onsets->size[0] == 1)
&& (onsets->size[1] == 1))) {
} else {
for (i0 = 0; i0 < 2; i0++) {
varargin_1[i0] = onsets->size[i0];
}
ixstart = varargin_1[0];
if (varargin_1[1] > varargin_1[0]) {
ixstart = varargin_1[1];
}
i0 = b->size[0] * b->size[1];
b->size[0] = 1;
b->size[1] = ixstart;
emxEnsureCapacity((emxArray__common *)b, i0, (int32_T)sizeof(boolean_T));
vstride = 1;
npages = onsets->size[0] * onsets->size[1];
for (dim = 0; dim < 2; dim++) {
i0 = onsets->size[dim];
npages = div_s32(npages, onsets->size[dim]);
if (onsets->size[dim] > 1) {
ixstart = (int32_T)fabs(-1.0 + (((real_T)dim + 1.0) - 1.0));
if (ixstart - div_s32(ixstart, onsets->size[dim]) * onsets->size[dim] >
0) {
ixstart = (onsets->size[dim] - 1) * vstride;
k0 = 0;
for (i = 1; i <= npages; i++) {
nxin = k0;
k0 += ixstart;
for (j = 1; j <= vstride; j++) {
nxin++;
k0++;
ia = nxin;
for (k = 1; k <= i0; k++) {
b->data[k - 1] = onsets->data[ia - 1];
ia += vstride;
}
ia = nxin - 1;
for (k = 2; k <= i0; k++) {
onsets->data[ia] = b->data[k - 1];
ia += vstride;
}
onsets->data[ia] = b->data[0];
}
}
}
}
vstride *= i0;
}
}
emxFree_boolean_T(&b);
/* Some post processing to remove sequences of onsets */
/* Changing to non vectorized versions for export */
if ((0 == onsets->size[0]) || (0 == onsets->size[1])) {
ixstart = 0;
} else if (onsets->size[0] > onsets->size[1]) {
ixstart = onsets->size[0];
} else {
ixstart = onsets->size[1];
}
for (i = 0; i <= ixstart - 3; i++) {
if ((onsets->data[i] == 1) && (onsets->data[i + 1] == 1)) {
if (onsets->data[i + 2] == 1) {
onsets->data[i + 2] = FALSE;
}
onsets->data[i + 1] = FALSE;
}
}
/* tripleInd = strfind(onsets',[1,1,1]); */
/* onsets(tripleInd+1) = 0; */
/* onsets(tripleInd+2) = 0; */
/* */
/* doubleInd = strfind(onsets',[1,1]); */
/* onsets(doubleInd+1) = 0; */
/* onsets(1) = 0; */
/* flux(1) = 0; */
/* onsets(end+1) = 0; */
/* flux(end+1) = 0; */
/* xmin = 1; */
/* xmax = length(flux); */
/* */
/* figure */
/* subplot(4,1,1) */
/* stem(mask1); */
/* axis([xmin xmax 0 1]); */
/* subplot(4,1,2) */
/* stem(mask2); */
/* axis([xmin xmax 0 1]); */
/* subplot(4,1,3) */
/* stem(mask1&mask2); */
/* axis([xmin xmax 0 1]); */
/* subplot(4,1,4); */
/* imagesc(denoisedSpec); */
/* axis([xmin xmax 0 512]); */
/* axis('xy'); */
/* colormap(hot); */
}
static void onsetFlux(const emxArray_real_T *S, emxArray_real_T *flux)
{
emxArray_real_T *b_S;
int32_T iyLead;
int32_T loop_ub;
int32_T ixLead;
int32_T iy;
emxArray_real_T *x;
int32_T d;
emxArray_real_T *b_y1;
uint32_T ySize[2];
int32_T ix;
real_T work;
real_T tmp2;
emxArray_real_T *r4;
emxArray_real_T *b_flux;
real_T y;
real_T r;
emxArray_real_T *c_flux;
emxArray_boolean_T *b_x;
emxArray_int32_T *r5;
b_emxInit_real_T(&b_S, 1);
/* ONSETFLUX Computes new spectral flux */
/* Detailed explanation goes here */
/* Just to be sure */
/* S = abs(S); */
iyLead = S->size[0];
loop_ub = S->size[0];
ixLead = S->size[1];
iy = b_S->size[0];
b_S->size[0] = loop_ub;
emxEnsureCapacity((emxArray__common *)b_S, iy, (int32_T)sizeof(real_T));
for (iy = 0; iy < loop_ub; iy++) {
b_S->data[iy] = S->data[iy + S->size[0] * (ixLead - 1)];
}
emxInit_real_T(&x, 2);
iy = x->size[0] * x->size[1];
x->size[0] = S->size[0];
x->size[1] = S->size[1] + 1;
emxEnsureCapacity((emxArray__common *)x, iy, (int32_T)sizeof(real_T));
loop_ub = S->size[1];
for (iy = 0; iy < loop_ub; iy++) {
d = S->size[0];
for (ixLead = 0; ixLead < d; ixLead++) {
x->data[ixLead + x->size[0] * iy] = S->data[ixLead + S->size[0] * iy];
}
}
iy = 0;
while (iy <= 0) {
for (iy = 0; iy < iyLead; iy++) {
x->data[iy + x->size[0] * S->size[1]] = b_S->data[iy];
}
iy = 1;
}
emxFree_real_T(&b_S);
emxInit_real_T(&b_y1, 2);
if (1 >= x->size[1]) {
for (iy = 0; iy < 2; iy++) {
ySize[iy] = (uint32_T)x->size[iy];
}
iy = b_y1->size[0] * b_y1->size[1];
b_y1->size[0] = (int32_T)ySize[0];
emxEnsureCapacity((emxArray__common *)b_y1, iy, (int32_T)sizeof(real_T));
iy = b_y1->size[0] * b_y1->size[1];
b_y1->size[1] = 0;
emxEnsureCapacity((emxArray__common *)b_y1, iy, (int32_T)sizeof(real_T));
} else {
for (iy = 0; iy < 2; iy++) {
ySize[iy] = (uint32_T)x->size[iy];
}
iy = b_y1->size[0] * b_y1->size[1];
b_y1->size[0] = (int32_T)ySize[0];
b_y1->size[1] = x->size[1] - 1;
emxEnsureCapacity((emxArray__common *)b_y1, iy, (int32_T)sizeof(real_T));
ix = 0;
iy = 1;
for (d = 1; d <= x->size[0]; d++) {
ixLead = ix + x->size[0];
iyLead = iy;
work = x->data[ix];
for (loop_ub = 2; loop_ub <= x->size[1]; loop_ub++) {
tmp2 = work;
work = x->data[ixLead];
tmp2 = x->data[ixLead] - tmp2;
ixLead += x->size[0];
b_y1->data[iyLead - 1] = tmp2;
iyLead += x->size[0];
}
ix++;
iy++;
}
}
emxFree_real_T(&x);
/* Half wave rectification */
for (iy = 0; iy < 2; iy++) {
ySize[iy] = (uint32_T)b_y1->size[iy];
}
emxInit_real_T(&r4, 2);
iy = r4->size[0] * r4->size[1];
r4->size[0] = (int32_T)ySize[0];
r4->size[1] = (int32_T)ySize[1];
emxEnsureCapacity((emxArray__common *)r4, iy, (int32_T)sizeof(real_T));
iy = b_y1->size[0] * b_y1->size[1];
for (loop_ub = 0; loop_ub < iy; loop_ub++) {
r4->data[(int32_T)(1.0 + (real_T)loop_ub) - 1] = fabs(b_y1->data[(int32_T)
(1.0 + (real_T)loop_ub) - 1]);
}
iy = b_y1->size[0] * b_y1->size[1];
emxEnsureCapacity((emxArray__common *)b_y1, iy, (int32_T)sizeof(real_T));
ixLead = b_y1->size[0];
d = b_y1->size[1];
iyLead = ixLead * d;
for (iy = 0; iy < iyLead; iy++) {
b_y1->data[iy] = (b_y1->data[iy] + r4->data[iy]) / 2.0;
}
emxFree_real_T(&r4);
/* Summed across all bins */
for (iy = 0; iy < 2; iy++) {
ySize[iy] = (uint32_T)b_y1->size[iy];
}
emxInit_real_T(&b_flux, 2);
iy = b_flux->size[0] * b_flux->size[1];
b_flux->size[0] = 1;
b_flux->size[1] = (int32_T)ySize[1];
emxEnsureCapacity((emxArray__common *)b_flux, iy, (int32_T)sizeof(real_T));
if ((b_y1->size[0] == 0) || (b_y1->size[1] == 0)) {
iy = b_flux->size[0] * b_flux->size[1];
b_flux->size[0] = 1;
emxEnsureCapacity((emxArray__common *)b_flux, iy, (int32_T)sizeof(real_T));
iy = b_flux->size[0] * b_flux->size[1];
b_flux->size[1] = (int32_T)ySize[1];
emxEnsureCapacity((emxArray__common *)b_flux, iy, (int32_T)sizeof(real_T));
iyLead = (int32_T)ySize[1];
for (iy = 0; iy < iyLead; iy++) {
b_flux->data[iy] = 0.0;
}
} else {
ix = -1;
iy = -1;
for (d = 1; d <= b_y1->size[1]; d++) {
ixLead = ix + 1;
ix++;
tmp2 = b_y1->data[ixLead];
for (loop_ub = 2; loop_ub <= b_y1->size[0]; loop_ub++) {
ix++;
tmp2 += b_y1->data[ix];
}
iy++;
b_flux->data[iy] = tmp2;
}
}
emxFree_real_T(&b_y1);
if (b_flux->size[1] == 0) {
tmp2 = 0.0;
} else {
tmp2 = b_flux->data[0];
for (loop_ub = 2; loop_ub <= b_flux->size[1]; loop_ub++) {
tmp2 += b_flux->data[loop_ub - 1];
}
}
tmp2 /= (real_T)b_flux->size[1];
if (b_flux->size[1] > 1) {
d = b_flux->size[1] - 1;
} else {
d = b_flux->size[1];
}
if (b_flux->size[1] == 0) {
y = rtNaN;
} else {
ix = 0;
work = b_flux->data[0];
for (loop_ub = 0; loop_ub <= b_flux->size[1] - 2; loop_ub++) {
ix++;
work += b_flux->data[ix];
}
work /= (real_T)b_flux->size[1];
ix = 0;
r = b_flux->data[0] - work;
y = r * r;
for (loop_ub = 0; loop_ub <= b_flux->size[1] - 2; loop_ub++) {
ix++;
r = b_flux->data[ix] - work;
y += r * r;
}
y /= (real_T)d;
}
emxInit_real_T(&c_flux, 2);
iy = c_flux->size[0] * c_flux->size[1];
c_flux->size[0] = 1;
c_flux->size[1] = b_flux->size[1];
emxEnsureCapacity((emxArray__common *)c_flux, iy, (int32_T)sizeof(real_T));
iyLead = b_flux->size[0] * b_flux->size[1];
for (iy = 0; iy < iyLead; iy++) {
c_flux->data[iy] = b_flux->data[iy] - tmp2;
}
rdivide(c_flux, y, b_flux);
iy = flux->size[0];
flux->size[0] = b_flux->size[1];
emxEnsureCapacity((emxArray__common *)flux, iy, (int32_T)sizeof(real_T));
iyLead = b_flux->size[1];
emxFree_real_T(&c_flux);
for (iy = 0; iy < iyLead; iy++) {
flux->data[iy] = b_flux->data[iy];
}
emxFree_real_T(&b_flux);
b_emxInit_boolean_T(&b_x, 1);
iy = b_x->size[0];
b_x->size[0] = flux->size[0];
emxEnsureCapacity((emxArray__common *)b_x, iy, (int32_T)sizeof(boolean_T));
iyLead = flux->size[0];
for (iy = 0; iy < iyLead; iy++) {
b_x->data[iy] = (flux->data[iy] < 0.0);
}
loop_ub = 0;
for (d = 1; d <= b_x->size[0]; d++) {
if (b_x->data[d - 1]) {
loop_ub++;
}
}
emxInit_int32_T(&r5, 1);
iy = r5->size[0];
r5->size[0] = loop_ub;
emxEnsureCapacity((emxArray__common *)r5, iy, (int32_T)sizeof(int32_T));
ixLead = 0;
for (d = 1; d <= b_x->size[0]; d++) {
if (b_x->data[d - 1]) {
r5->data[ixLead] = d;
ixLead++;
}
}
emxFree_boolean_T(&b_x);
iyLead = r5->size[0];
for (iy = 0; iy < iyLead; iy++) {
flux->data[r5->data[iy] - 1] = 0.0;
}
emxFree_int32_T(&r5);
}
static void padarray(const emxArray_real_T *varargin_1, emxArray_real_T *b)
{
real_T sizeB[2];
int32_T i3;
real_T b_sizeB;
int32_T c_sizeB[2];
int32_T outsize[2];
int32_T loop_ub;
for (i3 = 0; i3 < 2; i3++) {
sizeB[i3] = 0.0;
}
sizeB[0] = 3.0;
if ((varargin_1->size[0] == 0) || (varargin_1->size[1] == 0)) {
for (i3 = 0; i3 < 2; i3++) {
b_sizeB = (real_T)varargin_1->size[i3] + 2.0 * sizeB[i3];
sizeB[i3] = b_sizeB;
}
c_sizeB[0] = (int32_T)sizeB[0];
c_sizeB[1] = (int32_T)sizeB[1];
for (i3 = 0; i3 < 2; i3++) {
outsize[i3] = c_sizeB[i3];
}
i3 = b->size[0] * b->size[1];
b->size[0] = outsize[0];
emxEnsureCapacity((emxArray__common *)b, i3, (int32_T)sizeof(real_T));
i3 = b->size[0] * b->size[1];
b->size[1] = outsize[1];
emxEnsureCapacity((emxArray__common *)b, i3, (int32_T)sizeof(real_T));
loop_ub = outsize[0] * outsize[1];
for (i3 = 0; i3 < loop_ub; i3++) {
b->data[i3] = 0.0;
}
} else {
ConstantPad(varargin_1, sizeB, b);
}
}
static void rdivide(const emxArray_real_T *x, real_T y, emxArray_real_T *z)
{
int32_T i1;
int32_T loop_ub;
i1 = z->size[0] * z->size[1];
z->size[0] = 1;
z->size[1] = x->size[1];
emxEnsureCapacity((emxArray__common *)z, i1, (int32_T)sizeof(real_T));
loop_ub = x->size[0] * x->size[1];
for (i1 = 0; i1 < loop_ub; i1++) {
z->data[i1] = x->data[i1] / y;
}
}
static real_T rt_powd_snf(real_T u0, real_T u1)
{
real_T y;
real_T d0;
real_T d1;
if (rtIsNaN(u0) || rtIsNaN(u1)) {
y = rtNaN;
} else {
d0 = fabs(u0);
d1 = fabs(u1);
if (rtIsInf(u1)) {
if (d0 == 1.0) {
y = rtNaN;
} else if (d0 > 1.0) {
if (u1 > 0.0) {
y = rtInf;
} else {
y = 0.0;
}
} else if (u1 > 0.0) {
y = 0.0;
} else {
y = rtInf;
}
} else if (d1 == 0.0) {
y = 1.0;
} else if (d1 == 1.0) {
if (u1 > 0.0) {
y = u0;
} else {
y = 1.0 / u0;
}
} else if (u1 == 2.0) {
y = u0 * u0;
} else if ((u1 == 0.5) && (u0 >= 0.0)) {
y = sqrt(u0);
} else if ((u0 < 0.0) && (u1 > floor(u1))) {
y = rtNaN;
} else {
y = pow(u0, u1);
}
}
return y;
}
void computeOnsetFeatures(const emxArray_real_T *denoisedSpectrum, const
emxArray_real_T *T, real_T ioiFeatures[12], emxArray_boolean_T *onsets)
{
emxArray_real_T *unusedU0;
emxArray_boolean_T *b_onsets;
int32_T i;
int32_T loop_ub;
real_T ioiHist[17];
boolean_T bv0[12];
int32_T tmp_size[1];
int32_T tmp_data[12];
emxInit_real_T(&unusedU0, 2);
emxInit_boolean_T(&b_onsets, 2);
/* COMPUTEONSETFEATURES Computes onset features */
onsetDetection(denoisedSpectrum, onsets, unusedU0);
/* Collecting number of onsets per 2 second window */
/* onsetFeatures1 = onsetFeatures(onsets,T); */
/* Collect IOI histogram */
i = b_onsets->size[0] * b_onsets->size[1];
b_onsets->size[0] = onsets->size[0];
b_onsets->size[1] = onsets->size[1];
emxEnsureCapacity((emxArray__common *)b_onsets, i, (int32_T)sizeof(boolean_T));
loop_ub = onsets->size[0] * onsets->size[1];
emxFree_real_T(&unusedU0);
for (i = 0; i < loop_ub; i++) {
b_onsets->data[i] = onsets->data[i];
}
ioiHistogram(b_onsets, T, ioiHist);
histogramFeatures(ioiHist, ioiFeatures);
/* ioiFeatures = vertcat(ioiFeatures,onsetFeatures1); */
emxFree_boolean_T(&b_onsets);
for (i = 0; i < 12; i++) {
bv0[i] = rtIsNaN(ioiFeatures[i]);
}
b_eml_li_find(bv0, tmp_data, tmp_size);
loop_ub = tmp_size[0];
for (i = 0; i < loop_ub; i++) {
ioiFeatures[tmp_data[i] - 1] = 0.0;
}
for (i = 0; i < 12; i++) {
bv0[i] = rtIsInf(ioiFeatures[i]);
}
b_eml_li_find(bv0, tmp_data, tmp_size);
loop_ub = tmp_size[0];
for (i = 0; i < loop_ub; i++) {
ioiFeatures[tmp_data[i] - 1] = 0.0;
}
}
void computeOnsetFeatures_initialize(void)
{
rt_InitInfAndNaN(8U);
}
void computeOnsetFeatures_terminate(void)
{
/* (no terminate code required) */
}
emxArray_boolean_T *emxCreateND_boolean_T(int32_T numDimensions, int32_T *size)
{
emxArray_boolean_T *emx;
int32_T numEl;
int32_T i;
emxInit_boolean_T(&emx, numDimensions);
numEl = 1;
for (i = 0; i < numDimensions; i++) {
numEl *= size[i];
emx->size[i] = size[i];
}
emx->data = (boolean_T *)calloc((uint32_T)numEl, sizeof(boolean_T));
emx->numDimensions = numDimensions;
emx->allocatedSize = numEl;
return emx;
}
//emxArray_real_T *emxCreateND_real_T(int32_T numDimensions, int32_T *size)
//{
// emxArray_real_T *emx;
// int32_T numEl;
// int32_T i;
// emxInit_real_T(&emx, numDimensions);
// numEl = 1;
// for (i = 0; i < numDimensions; i++) {
// numEl *= size[i];
// emx->size[i] = size[i];
// }
//
// emx->data = (real_T *)calloc((uint32_T)numEl, sizeof(real_T));
// emx->numDimensions = numDimensions;
// emx->allocatedSize = numEl;
// return emx;
//}
emxArray_boolean_T *emxCreateWrapperND_boolean_T(boolean_T *data, int32_T
numDimensions, int32_T *size)
{
emxArray_boolean_T *emx;
int32_T numEl;
int32_T i;
emxInit_boolean_T(&emx, numDimensions);
numEl = 1;
for (i = 0; i < numDimensions; i++) {
numEl *= size[i];
emx->size[i] = size[i];
}
emx->data = data;
emx->numDimensions = numDimensions;
emx->allocatedSize = numEl;
emx->canFreeData = FALSE;
return emx;
}
//emxArray_real_T *emxCreateWrapperND_real_T(real_T *data, int32_T numDimensions,
// int32_T *size)
//{
// emxArray_real_T *emx;
// int32_T numEl;
// int32_T i;
// emxInit_real_T(&emx, numDimensions);
// numEl = 1;
// for (i = 0; i < numDimensions; i++) {
// numEl *= size[i];
// emx->size[i] = size[i];
// }
//
// emx->data = data;
// emx->numDimensions = numDimensions;
// emx->allocatedSize = numEl;
// emx->canFreeData = FALSE;
// return emx;
//}
emxArray_boolean_T *emxCreateWrapper_boolean_T(boolean_T *data, int32_T rows,
int32_T cols)
{
emxArray_boolean_T *emx;
int32_T size[2];
int32_T numEl;
int32_T i;
size[0] = rows;
size[1] = cols;
emxInit_boolean_T(&emx, 2);
numEl = 1;
for (i = 0; i < 2; i++) {
numEl *= size[i];
emx->size[i] = size[i];
}
emx->data = data;
emx->numDimensions = 2;
emx->allocatedSize = numEl;
emx->canFreeData = FALSE;
return emx;
}
//emxArray_real_T *emxCreateWrapper_real_T(real_T *data, int32_T rows, int32_T
// cols)
//{
// emxArray_real_T *emx;
// int32_T size[2];
// int32_T numEl;
// int32_T i;
// size[0] = rows;
// size[1] = cols;
// emxInit_real_T(&emx, 2);
// numEl = 1;
// for (i = 0; i < 2; i++) {
// numEl *= size[i];
// emx->size[i] = size[i];
// }
//
// emx->data = data;
// emx->numDimensions = 2;
// emx->allocatedSize = numEl;
// emx->canFreeData = FALSE;
// return emx;
//}
emxArray_boolean_T *emxCreate_boolean_T(int32_T rows, int32_T cols)
{
emxArray_boolean_T *emx;
int32_T size[2];
int32_T numEl;
int32_T i;
size[0] = rows;
size[1] = cols;
emxInit_boolean_T(&emx, 2);
numEl = 1;
for (i = 0; i < 2; i++) {
numEl *= size[i];
emx->size[i] = size[i];
}
emx->data = (boolean_T *)calloc((uint32_T)numEl, sizeof(boolean_T));
emx->numDimensions = 2;
emx->allocatedSize = numEl;
return emx;
}
//emxArray_real_T *emxCreate_real_T(int32_T rows, int32_T cols)
//{
// emxArray_real_T *emx;
// int32_T size[2];
// int32_T numEl;
// int32_T i;
// size[0] = rows;
// size[1] = cols;
// emxInit_real_T(&emx, 2);
// numEl = 1;
// for (i = 0; i < 2; i++) {
// numEl *= size[i];
// emx->size[i] = size[i];
// }
//
// emx->data = (real_T *)calloc((uint32_T)numEl, sizeof(real_T));
// emx->numDimensions = 2;
// emx->allocatedSize = numEl;
// return emx;
//}
void emxDestroyArray_boolean_T(emxArray_boolean_T *emxArray)
{
emxFree_boolean_T(&emxArray);
}
//void emxDestroyArray_real_T(emxArray_real_T *emxArray)
//{
// emxFree_real_T(&emxArray);
//}
/* End of code generation (computeOnsetFeatures.c) */
| aneeshvartakavi/birdID | birdID/Source/Export/computeOnsetFeatures_export.c | C | gpl-2.0 | 74,844 |
/*
* This file contains work-arounds for many known PCI hardware
* bugs. Devices present only on certain architectures (host
* bridges et cetera) should be handled in arch-specific code.
*
* Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
*
* Copyright (c) 1999 Martin Mares <mj@ucw.cz>
*
* Init/reset quirks for USB host controllers should be in the
* USB quirks file, where their drivers can access reuse it.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/kallsyms.h>
#include <linux/dmi.h>
#include <linux/pci-aspm.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/ktime.h>
#include <linux/mm.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
/*
* Decoding should be disabled for a PCI device during BAR sizing to avoid
* conflict. But doing so may cause problems on host bridge and perhaps other
* key system devices. For devices that need to have mmio decoding always-on,
* we need to set the dev->mmio_always_on bit.
*/
static void quirk_mmio_always_on(struct pci_dev *dev)
{
dev->mmio_always_on = 1;
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
/* The Mellanox Tavor device gives false positive parity errors
* Mark this device with a broken_parity_status, to allow
* PCI scanning code to "skip" this now blacklisted device.
*/
static void quirk_mellanox_tavor(struct pci_dev *dev)
{
dev->broken_parity_status = 1; /* This device gives false positives */
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
/* Deal with broken BIOSes that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */
static void quirk_passive_release(struct pci_dev *dev)
{
struct pci_dev *d = NULL;
unsigned char dlc;
/* We have to make sure a particular bit is set in the PIIX3
ISA bridge, so we have to go out and find it. */
while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
pci_read_config_byte(d, 0x82, &dlc);
if (!(dlc & 1<<1)) {
dev_info(&d->dev, "PIIX3: Enabling Passive Release\n");
dlc |= 1<<1;
pci_write_config_byte(d, 0x82, dlc);
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
but VIA don't answer queries. If you happen to have good contacts at VIA
ask them for me please -- Alan
This appears to be BIOS not version dependent. So presumably there is a
chipset level fix */
static void quirk_isa_dma_hangs(struct pci_dev *dev)
{
if (!isa_dma_bridge_buggy) {
isa_dma_bridge_buggy = 1;
dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n");
}
}
/*
* Its not totally clear which chipsets are the problematic ones
* We know 82C586 and 82C596 variants are affected.
*/
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
/*
* Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
* for some HT machines to use C4 w/o hanging.
*/
static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
{
u32 pmbase;
u16 pm1a;
pci_read_config_dword(dev, 0x40, &pmbase);
pmbase = pmbase & 0xff80;
pm1a = inw(pmbase);
if (pm1a & 0x10) {
dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
outw(0x10, pmbase);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
/*
* Chipsets where PCI->PCI transfers vanish or hang
*/
static void quirk_nopcipci(struct pci_dev *dev)
{
if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_FAIL;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
static void quirk_nopciamd(struct pci_dev *dev)
{
u8 rev;
pci_read_config_byte(dev, 0x08, &rev);
if (rev == 0x13) {
/* Erratum 24 */
dev_info(&dev->dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
pci_pci_problems |= PCIAGP_FAIL;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
/*
* Triton requires workarounds to be used by the drivers
*/
static void quirk_triton(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_TRITON;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
/*
* VIA Apollo KT133 needs PCI latency patch
* Made according to a windows driver based patch by George E. Breese
* see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
* Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
* the info on which Mr Breese based his work.
*
* Updated based on further information from the site and also on
* information provided by VIA
*/
static void quirk_vialatency(struct pci_dev *dev)
{
struct pci_dev *p;
u8 busarb;
/* Ok we have a potential problem chipset here. Now see if we have
a buggy southbridge */
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
if (p != NULL) {
/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
/* Check for buggy part revisions */
if (p->revision < 0x40 || p->revision > 0x42)
goto exit;
} else {
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
if (p == NULL) /* No problem parts */
goto exit;
/* Check for buggy part revisions */
if (p->revision < 0x10 || p->revision > 0x12)
goto exit;
}
/*
* Ok we have the problem. Now set the PCI master grant to
* occur every master grant. The apparent bug is that under high
* PCI load (quite common in Linux of course) you can get data
* loss when the CPU is held off the bus for 3 bus master requests
* This happens to include the IDE controllers....
*
* VIA only apply this fix when an SB Live! is present but under
* both Linux and Windows this isn't enough, and we have seen
* corruption without SB Live! but with things like 3 UDMA IDE
* controllers. So we ignore that bit of the VIA recommendation..
*/
pci_read_config_byte(dev, 0x76, &busarb);
/* Set bit 4 and bi 5 of byte 76 to 0x01
"Master priority rotation on every PCI master grant */
busarb &= ~(1<<5);
busarb |= (1<<4);
pci_write_config_byte(dev, 0x76, busarb);
dev_info(&dev->dev, "Applying VIA southbridge workaround\n");
exit:
pci_dev_put(p);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
/* Must restore this on a resume from RAM */
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
/*
* VIA Apollo VP3 needs ETBF on BT848/878
*/
static void quirk_viaetbf(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_VIAETBF;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
static void quirk_vsfx(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_VSFX;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
/*
* Ali Magik requires workarounds to be used by the drivers
* that DMA to AGP space. Latency must be set to 0xA and triton
* workaround applied too
* [Info kindly provided by ALi]
*/
static void quirk_alimagik(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
/*
* Natoma has some interesting boundary conditions with Zoran stuff
* at least
*/
static void quirk_natoma(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_NATOMA;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
/*
* This chip can cause PCI parity errors if config register 0xA0 is read
* while DMAs are occurring.
*/
static void quirk_citrine(struct pci_dev *dev)
{
dev->cfg_size = 0xA0;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
/*
* This chip can cause bus lockups if config addresses above 0x600
* are read or written.
*/
static void quirk_nfp6000(struct pci_dev *dev)
{
dev->cfg_size = 0x600;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
static void quirk_extend_bar_to_page(struct pci_dev *dev)
{
int i;
for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
struct resource *r = &dev->resource[i];
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
r->end = PAGE_SIZE - 1;
r->start = 0;
r->flags |= IORESOURCE_UNSET;
dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
i, r);
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
/*
* S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
* If it's needed, re-allocate the region.
*/
static void quirk_s3_64M(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0x3ffffff;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
const char *name)
{
u32 region;
struct pci_bus_region bus_region;
struct resource *res = dev->resource + pos;
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion);
if (!region)
return;
res->name = pci_name(dev);
res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
res->flags |=
(IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
region &= ~(size - 1);
/* Convert from PCI bus to resource space */
bus_region.start = region;
bus_region.end = region + size - 1;
pcibios_bus_to_resource(dev->bus, res, &bus_region);
dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
}
/*
* Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
* ver. 1.33 20070103) don't set the correct ISA PCI region header info.
* BAR0 should be 8 bytes; instead, it may be set to something like 8k
* (which conflicts w/ BAR1's memory range).
*
* CS553x's ISA PCI BARs may also be read-only (ref:
* https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
*/
static void quirk_cs5536_vsa(struct pci_dev *dev)
{
static char *name = "CS5536 ISA bridge";
if (pci_resource_len(dev, 0) != 8) {
quirk_io(dev, 0, 8, name); /* SMB */
quirk_io(dev, 1, 256, name); /* GPIO */
quirk_io(dev, 2, 64, name); /* MFGPT */
dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
name);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
static void quirk_io_region(struct pci_dev *dev, int port,
unsigned size, int nr, const char *name)
{
u16 region;
struct pci_bus_region bus_region;
struct resource *res = dev->resource + nr;
pci_read_config_word(dev, port, ®ion);
region &= ~(size - 1);
if (!region)
return;
res->name = pci_name(dev);
res->flags = IORESOURCE_IO;
/* Convert from PCI bus to resource space */
bus_region.start = region;
bus_region.end = region + size - 1;
pcibios_bus_to_resource(dev->bus, res, &bus_region);
if (!pci_claim_resource(dev, nr))
dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
}
/*
* ATI Northbridge setups MCE the processor if you even
* read somewhere between 0x3b0->0x3bb or read 0x3d3
*/
static void quirk_ati_exploding_mce(struct pci_dev *dev)
{
dev_info(&dev->dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
/* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */
request_region(0x3b0, 0x0C, "RadeonIGP");
request_region(0x3d3, 0x01, "RadeonIGP");
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
/*
* In the AMD NL platform, this device ([1022:7912]) has a class code of
* PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
* claim it.
* But the dwc3 driver is a more specific driver for this device, and we'd
* prefer to use it instead of xhci. To prevent xhci from claiming the
* device, change the class code to 0x0c03fe, which the PCI r3.0 spec
* defines as "USB device (not host controller)". The dwc3 driver can then
* claim it based on its Vendor and Device ID.
*/
static void quirk_amd_nl_class(struct pci_dev *pdev)
{
u32 class = pdev->class;
/* Use "USB Device (not host controller)" class */
pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
dev_info(&pdev->dev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
class, pdev->class);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
quirk_amd_nl_class);
/*
* Let's make the southbridge information explicit instead
* of having to worry about people probing the ACPI areas,
* for example.. (Yes, it happens, and if you read the wrong
* ACPI register it will put the machine to sleep with no
* way of waking it up again. Bummer).
*
* ALI M7101: Two IO regions pointed to by words at
* 0xE0 (64 bytes of ACPI registers)
* 0xE2 (32 bytes of SMB registers)
*/
static void quirk_ali7101_acpi(struct pci_dev *dev)
{
quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
{
u32 devres;
u32 mask, size, base;
pci_read_config_dword(dev, port, &devres);
if ((devres & enable) != enable)
return;
mask = (devres >> 16) & 15;
base = devres & 0xffff;
size = 16;
for (;;) {
unsigned bit = size >> 1;
if ((bit & mask) == bit)
break;
size = bit;
}
/*
* For now we only print it out. Eventually we'll want to
* reserve it (at least if it's in the 0x1000+ range), but
* let's get enough confirmation reports first.
*/
base &= -size;
dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base,
base + size - 1);
}
static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
{
u32 devres;
u32 mask, size, base;
pci_read_config_dword(dev, port, &devres);
if ((devres & enable) != enable)
return;
base = devres & 0xffff0000;
mask = (devres & 0x3f) << 16;
size = 128 << 16;
for (;;) {
unsigned bit = size >> 1;
if ((bit & mask) == bit)
break;
size = bit;
}
/*
* For now we only print it out. Eventually we'll want to
* reserve it, but let's get enough confirmation reports first.
*/
base &= -size;
dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base,
base + size - 1);
}
/*
* PIIX4 ACPI: Two IO regions pointed to by longwords at
* 0x40 (64 bytes of ACPI registers)
* 0x90 (16 bytes of SMB registers)
* and a few strange programmable PIIX4 device resources.
*/
static void quirk_piix4_acpi(struct pci_dev *dev)
{
u32 res_a;
quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
/* Device resource A has enables for some of the other ones */
pci_read_config_dword(dev, 0x5c, &res_a);
piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
/* Device resource D is just bitfields for static resources */
/* Device 12 enabled? */
if (res_a & (1 << 29)) {
piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
}
/* Device 13 enabled? */
if (res_a & (1 << 30)) {
piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
}
piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
#define ICH_PMBASE 0x40
#define ICH_ACPI_CNTL 0x44
#define ICH4_ACPI_EN 0x10
#define ICH6_ACPI_EN 0x80
#define ICH4_GPIOBASE 0x58
#define ICH4_GPIO_CNTL 0x5c
#define ICH4_GPIO_EN 0x10
#define ICH6_GPIOBASE 0x48
#define ICH6_GPIO_CNTL 0x4c
#define ICH6_GPIO_EN 0x10
/*
* ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
* 0x40 (128 bytes of ACPI, GPIO & TCO registers)
* 0x58 (64 bytes of GPIO I/O space)
*/
static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
{
u8 enable;
/*
* The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict
* with low legacy (and fixed) ports. We don't know the decoding
* priority and can't tell whether the legacy device or the one created
* here is really at that address. This happens on boards with broken
* BIOSes.
*/
pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
if (enable & ICH4_ACPI_EN)
quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
"ICH4 ACPI/GPIO/TCO");
pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
if (enable & ICH4_GPIO_EN)
quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
"ICH4 GPIO");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
{
u8 enable;
pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
if (enable & ICH6_ACPI_EN)
quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
"ICH6 ACPI/GPIO/TCO");
pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
if (enable & ICH6_GPIO_EN)
quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
"ICH6 GPIO");
}
static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
{
u32 val;
u32 size, base;
pci_read_config_dword(dev, reg, &val);
/* Enabled? */
if (!(val & 1))
return;
base = val & 0xfffc;
if (dynsize) {
/*
* This is not correct. It is 16, 32 or 64 bytes depending on
* register D31:F0:ADh bits 5:4.
*
* But this gets us at least _part_ of it.
*/
size = 16;
} else {
size = 128;
}
base &= ~(size-1);
/* Just print it out for now. We should reserve it after more debugging */
dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
}
static void quirk_ich6_lpc(struct pci_dev *dev)
{
/* Shared ACPI/GPIO decode with all ICH6+ */
ich6_lpc_acpi_gpio(dev);
/* ICH6-specific generic IO decode */
ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
{
u32 val;
u32 mask, base;
pci_read_config_dword(dev, reg, &val);
/* Enabled? */
if (!(val & 1))
return;
/*
* IO base in bits 15:2, mask in bits 23:18, both
* are dword-based
*/
base = val & 0xfffc;
mask = (val >> 16) & 0xfc;
mask |= 3;
/* Just print it out for now. We should reserve it after more debugging */
dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
}
/* ICH7-10 has the same common LPC generic IO decode registers */
static void quirk_ich7_lpc(struct pci_dev *dev)
{
/* We share the common ACPI/GPIO decode with ICH6 */
ich6_lpc_acpi_gpio(dev);
/* And have 4 ICH7+ generic decodes */
ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
/*
* VIA ACPI: One IO region pointed to by longword at
* 0x48 or 0x20 (256 bytes of ACPI registers)
*/
static void quirk_vt82c586_acpi(struct pci_dev *dev)
{
if (dev->revision & 0x10)
quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
"vt82c586 ACPI");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
/*
* VIA VT82C686 ACPI: Three IO region pointed to by (long)words at
* 0x48 (256 bytes of ACPI registers)
* 0x70 (128 bytes of hardware monitoring register)
* 0x90 (16 bytes of SMB registers)
*/
static void quirk_vt82c686_acpi(struct pci_dev *dev)
{
quirk_vt82c586_acpi(dev);
quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
"vt82c686 HW-mon");
quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
/*
* VIA VT8235 ISA Bridge: Two IO regions pointed to by words at
* 0x88 (128 bytes of power management registers)
* 0xd0 (16 bytes of SMB registers)
*/
static void quirk_vt8235_acpi(struct pci_dev *dev)
{
quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
/*
* TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
* Disable fast back-to-back on the secondary bus segment
*/
static void quirk_xio2000a(struct pci_dev *dev)
{
struct pci_dev *pdev;
u16 command;
dev_warn(&dev->dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
pci_read_config_word(pdev, PCI_COMMAND, &command);
if (command & PCI_COMMAND_FAST_BACK)
pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
quirk_xio2000a);
#ifdef CONFIG_X86_IO_APIC
#include <asm/io_apic.h>
/*
* VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
* devices to the external APIC.
*
* TODO: When we have device-specific interrupt routers,
* this code will go away from quirks.
*/
static void quirk_via_ioapic(struct pci_dev *dev)
{
u8 tmp;
if (nr_ioapics < 1)
tmp = 0; /* nothing routed to external APIC */
else
tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
dev_info(&dev->dev, "%sbling VIA external APIC routing\n",
tmp == 0 ? "Disa" : "Ena");
/* Offset 0x58: External APIC IRQ output control */
pci_write_config_byte(dev, 0x58, tmp);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
/*
* VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit.
* This leads to doubled level interrupt rates.
* Set this bit to get rid of cycle wastage.
* Otherwise uncritical.
*/
static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
{
u8 misc_control2;
#define BYPASS_APIC_DEASSERT 8
pci_read_config_byte(dev, 0x5B, &misc_control2);
if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
dev_info(&dev->dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
/*
* The AMD io apic can hang the box when an apic irq is masked.
* We check all revs >= B0 (yet not in the pre production!) as the bug
* is currently marked NoFix
*
* We have multiple reports of hangs with this chipset that went away with
* noapic specified. For the moment we assume it's the erratum. We may be wrong
* of course. However the advice is demonstrably good even if so..
*/
static void quirk_amd_ioapic(struct pci_dev *dev)
{
if (dev->revision >= 0x02) {
dev_warn(&dev->dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
dev_warn(&dev->dev, " : booting with the \"noapic\" option\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
#endif /* CONFIG_X86_IO_APIC */
#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
{
/* Fix for improper SRIOV configuration on Cavium cn88xx RNM device */
if (dev->subsystem_device == 0xa118)
dev->sriov->link = dev->devfn;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
#endif
/*
* Some settings of MMRBC can lead to data corruption so block changes.
* See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
*/
static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
{
if (dev->subordinate && dev->revision <= 0x12) {
dev_info(&dev->dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
dev->revision);
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
/*
* FIXME: it is questionable that quirk_via_acpi
* is needed. It shows up as an ISA bridge, and does not
* support the PCI_INTERRUPT_LINE register at all. Therefore
* it seems like setting the pci_dev's 'irq' to the
* value of the ACPI SCI interrupt is only done for convenience.
* -jgarzik
*/
static void quirk_via_acpi(struct pci_dev *d)
{
/*
* VIA ACPI device: SCI IRQ line in PCI config byte 0x42
*/
u8 irq;
pci_read_config_byte(d, 0x42, &irq);
irq &= 0xf;
if (irq && (irq != 2))
d->irq = irq;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
/*
* VIA bridges which have VLink
*/
static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
static void quirk_via_bridge(struct pci_dev *dev)
{
/* See what bridge we have and find the device ranges */
switch (dev->device) {
case PCI_DEVICE_ID_VIA_82C686:
/* The VT82C686 is special, it attaches to PCI and can have
any device number. All its subdevices are functions of
that single device. */
via_vlink_dev_lo = PCI_SLOT(dev->devfn);
via_vlink_dev_hi = PCI_SLOT(dev->devfn);
break;
case PCI_DEVICE_ID_VIA_8237:
case PCI_DEVICE_ID_VIA_8237A:
via_vlink_dev_lo = 15;
break;
case PCI_DEVICE_ID_VIA_8235:
via_vlink_dev_lo = 16;
break;
case PCI_DEVICE_ID_VIA_8231:
case PCI_DEVICE_ID_VIA_8233_0:
case PCI_DEVICE_ID_VIA_8233A:
case PCI_DEVICE_ID_VIA_8233C_0:
via_vlink_dev_lo = 17;
break;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
/**
* quirk_via_vlink - VIA VLink IRQ number update
* @dev: PCI device
*
* If the device we are dealing with is on a PIC IRQ we need to
* ensure that the IRQ line register which usually is not relevant
* for PCI cards, is actually written so that interrupts get sent
* to the right place.
* We only do this on systems where a VIA south bridge was detected,
* and only for VIA devices on the motherboard (see quirk_via_bridge
* above).
*/
static void quirk_via_vlink(struct pci_dev *dev)
{
u8 irq, new_irq;
/* Check if we have VLink at all */
if (via_vlink_dev_lo == -1)
return;
new_irq = dev->irq;
/* Don't quirk interrupts outside the legacy IRQ range */
if (!new_irq || new_irq > 15)
return;
/* Internal device ? */
if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
return;
/* This is an internal VLink device on a PIC interrupt. The BIOS
ought to have set this but may not have, so we redo it */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
if (new_irq != irq) {
dev_info(&dev->dev, "VIA VLink IRQ fixup, from %d to %d\n",
irq, new_irq);
udelay(15); /* unknown if delay really needed */
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
/*
* VIA VT82C598 has its device ID settable and many BIOSes
* set it to the ID of VT82C597 for backward compatibility.
* We need to switch it off to be able to recognize the real
* type of the chip.
*/
static void quirk_vt82c598_id(struct pci_dev *dev)
{
pci_write_config_byte(dev, 0xfc, 0);
pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
/*
* CardBus controllers have a legacy base address that enables them
* to respond as i82365 pcmcia controllers. We don't want them to
* do this even if the Linux CardBus driver is not loaded, because
* the Linux i82365 driver does not (and should not) handle CardBus.
*/
static void quirk_cardbus_legacy(struct pci_dev *dev)
{
pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
/*
* Following the PCI ordering rules is optional on the AMD762. I'm not
* sure what the designers were smoking but let's not inhale...
*
* To be fair to AMD, it follows the spec by default, its BIOS people
* who turn it off!
*/
static void quirk_amd_ordering(struct pci_dev *dev)
{
u32 pcic;
pci_read_config_dword(dev, 0x4C, &pcic);
if ((pcic & 6) != 6) {
pcic |= 6;
dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
pci_write_config_dword(dev, 0x4C, pcic);
pci_read_config_dword(dev, 0x84, &pcic);
pcic |= (1 << 23); /* Required in this mode */
pci_write_config_dword(dev, 0x84, pcic);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
/*
* DreamWorks provided workaround for Dunord I-3000 problem
*
* This card decodes and responds to addresses not apparently
* assigned to it. We force a larger allocation to ensure that
* nothing gets put too close to it.
*/
static void quirk_dunord(struct pci_dev *dev)
{
struct resource *r = &dev->resource[1];
r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xffffff;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
/*
* i82380FB mobile docking controller: its PCI-to-PCI bridge
* is subtractive decoding (transparent), and does indicate this
* in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
* instead of 0x01.
*/
static void quirk_transparent_bridge(struct pci_dev *dev)
{
dev->transparent = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
/*
* Common misconfiguration of the MediaGX/Geode PCI master that will
* reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1
* datasheets found at http://www.national.com/analog for info on what
* these bits do. <christer@weinigel.se>
*/
static void quirk_mediagx_master(struct pci_dev *dev)
{
u8 reg;
pci_read_config_byte(dev, 0x41, ®);
if (reg & 2) {
reg &= ~2;
dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
reg);
pci_write_config_byte(dev, 0x41, reg);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
/*
* Ensure C0 rev restreaming is off. This is normally done by
* the BIOS but in the odd case it is not the results are corruption
* hence the presence of a Linux check
*/
static void quirk_disable_pxb(struct pci_dev *pdev)
{
u16 config;
if (pdev->revision != 0x04) /* Only C0 requires this */
return;
pci_read_config_word(pdev, 0x40, &config);
if (config & (1<<6)) {
config &= ~(1<<6);
pci_write_config_word(pdev, 0x40, config);
dev_info(&pdev->dev, "C0 revision 450NX. Disabling PCI restreaming\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
static void quirk_amd_ide_mode(struct pci_dev *pdev)
{
/* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */
u8 tmp;
pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
if (tmp == 0x01) {
pci_read_config_byte(pdev, 0x40, &tmp);
pci_write_config_byte(pdev, 0x40, tmp|1);
pci_write_config_byte(pdev, 0x9, 1);
pci_write_config_byte(pdev, 0xa, 6);
pci_write_config_byte(pdev, 0x40, tmp);
pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
dev_info(&pdev->dev, "set SATA to AHCI mode\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
/*
* Serverworks CSB5 IDE does not fully support native mode
*/
static void quirk_svwks_csb5ide(struct pci_dev *pdev)
{
u8 prog;
pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
if (prog & 5) {
prog &= ~5;
pdev->class &= ~5;
pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
/* PCI layer will sort out resources */
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
/*
* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
*/
static void quirk_ide_samemode(struct pci_dev *pdev)
{
u8 prog;
pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
dev_info(&pdev->dev, "IDE mode mismatch; forcing legacy mode\n");
prog &= ~5;
pdev->class &= ~5;
pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
/*
* Some ATA devices break if put into D3
*/
static void quirk_no_ata_d3(struct pci_dev *pdev)
{
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
}
/* Quirk the legacy ATA devices only. The AHCI ones are ok */
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
/* ALi loses some register settings that we cannot then restore */
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
/* VIA comes back fine but we need to keep it alive or ACPI GTM failures
occur when mode detecting */
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
/* This was originally an Alpha specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
*/
static void quirk_eisa_bridge(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_EISA << 8;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
/*
* On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
* is not activated. The myth is that Asus said that they do not want the
* users to be irritated by just another PCI Device in the Win98 device
* manager. (see the file prog/hotplug/README.p4b in the lm_sensors
* package 2.7.0 for details)
*
* The SMBus PCI Device can be activated by setting a bit in the ICH LPC
* bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
* becomes necessary to do this tweak in two steps -- the chosen trigger
* is either the Host bridge (preferred) or on-board VGA controller.
*
* Note that we used to unhide the SMBus that way on Toshiba laptops
* (Satellite A40 and Tecra M2) but then found that the thermal management
* was done by SMM code, which could cause unsynchronized concurrent
* accesses to the SMBus registers, with potentially bad effects. Thus you
* should be very careful when adding new entries: if SMM is accessing the
* Intel SMBus, this is a very good reason to leave it hidden.
*
* Likewise, many recent laptops use ACPI for thermal management. If the
* ACPI DSDT code accesses the SMBus, then Linux should not access it
* natively, and keeping the SMBus hidden is the right thing to do. If you
* are about to add an entry in the table below, please first disassemble
* the DSDT and double-check that there is no code accessing the SMBus.
*/
static int asus_hides_smbus;
static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
{
if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
switch (dev->subsystem_device) {
case 0x8025: /* P4B-LX */
case 0x8070: /* P4B */
case 0x8088: /* P4B533 */
case 0x1626: /* L3C notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
switch (dev->subsystem_device) {
case 0x80b1: /* P4GE-V */
case 0x80b2: /* P4PE */
case 0x8093: /* P4B533-V */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
switch (dev->subsystem_device) {
case 0x8030: /* P4T533 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
switch (dev->subsystem_device) {
case 0x8070: /* P4G8X Deluxe */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
switch (dev->subsystem_device) {
case 0x80c9: /* PU-DLS */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
switch (dev->subsystem_device) {
case 0x1751: /* M2N notebook */
case 0x1821: /* M5N notebook */
case 0x1897: /* A6L notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch (dev->subsystem_device) {
case 0x184b: /* W1N notebook */
case 0x186a: /* M6Ne notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
switch (dev->subsystem_device) {
case 0x80f2: /* P4P800-X */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
switch (dev->subsystem_device) {
case 0x1882: /* M6V notebook */
case 0x1977: /* A6VA notebook */
asus_hides_smbus = 1;
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch (dev->subsystem_device) {
case 0x088C: /* HP Compaq nc8000 */
case 0x0890: /* HP Compaq nc6000 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
switch (dev->subsystem_device) {
case 0x12bc: /* HP D330L */
case 0x12bd: /* HP D530 */
case 0x006a: /* HP Compaq nx9500 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
switch (dev->subsystem_device) {
case 0x12bf: /* HP xw4100 */
asus_hides_smbus = 1;
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch (dev->subsystem_device) {
case 0xC00C: /* Samsung P35 notebook */
asus_hides_smbus = 1;
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch (dev->subsystem_device) {
case 0x0058: /* Compaq Evo N620c */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
switch (dev->subsystem_device) {
case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */
/* Motherboard doesn't have Host bridge
* subvendor/subdevice IDs, therefore checking
* its on-board VGA controller */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
switch (dev->subsystem_device) {
case 0x00b8: /* Compaq Evo D510 CMT */
case 0x00b9: /* Compaq Evo D510 SFF */
case 0x00ba: /* Compaq Evo D510 USDT */
/* Motherboard doesn't have Host bridge
* subvendor/subdevice IDs and on-board VGA
* controller is disabled if an AGP card is
* inserted, therefore checking USB UHCI
* Controller #1 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
switch (dev->subsystem_device) {
case 0x001A: /* Compaq Deskpro EN SSF P667 815E */
/* Motherboard doesn't have host bridge
* subvendor/subdevice IDs, therefore checking
* its on-board VGA controller */
asus_hides_smbus = 1;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
static void asus_hides_smbus_lpc(struct pci_dev *dev)
{
u16 val;
if (likely(!asus_hides_smbus))
return;
pci_read_config_word(dev, 0xF2, &val);
if (val & 0x8) {
pci_write_config_word(dev, 0xF2, val & (~0x8));
pci_read_config_word(dev, 0xF2, &val);
if (val & 0x8)
dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
val);
else
dev_info(&dev->dev, "Enabled i801 SMBus device\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
/* It appears we just have one such device. If not, we have a warning */
static void __iomem *asus_rcba_base;
static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
{
u32 rcba;
if (likely(!asus_hides_smbus))
return;
WARN_ON(asus_rcba_base);
pci_read_config_dword(dev, 0xF0, &rcba);
/* use bits 31:14, 16 kB aligned */
asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
if (asus_rcba_base == NULL)
return;
}
static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
{
u32 val;
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
/* read the Function Disable register, dword mode only */
val = readl(asus_rcba_base + 0x3418);
writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
}
static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
{
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
iounmap(asus_rcba_base);
asus_rcba_base = NULL;
dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n");
}
static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
{
asus_hides_smbus_lpc_ich6_suspend(dev);
asus_hides_smbus_lpc_ich6_resume_early(dev);
asus_hides_smbus_lpc_ich6_resume(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
/*
* SiS 96x south bridge: BIOS typically hides SMBus device...
*/
static void quirk_sis_96x_smbus(struct pci_dev *dev)
{
u8 val = 0;
pci_read_config_byte(dev, 0x77, &val);
if (val & 0x10) {
dev_info(&dev->dev, "Enabling SiS 96x SMBus\n");
pci_write_config_byte(dev, 0x77, val & ~0x10);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
/*
* ... This is further complicated by the fact that some SiS96x south
* bridges pretend to be 85C503/5513 instead. In that case see if we
* spotted a compatible north bridge to make sure.
* (pci_find_device doesn't work yet)
*
* We can also enable the sis96x bit in the discovery register..
*/
#define SIS_DETECT_REGISTER 0x40
static void quirk_sis_503(struct pci_dev *dev)
{
u8 reg;
u16 devid;
pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®);
pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
return;
}
/*
* Ok, it now shows up as a 96x.. run the 96x quirk by
* hand in case it has already been processed.
* (depends on link order, which is apparently not guaranteed)
*/
dev->device = devid;
quirk_sis_96x_smbus(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
/*
* On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
* and MC97 modem controller are disabled when a second PCI soundcard is
* present. This patch, tweaking the VT8237 ISA bridge, enables them.
* -- bjd
*/
static void asus_hides_ac97_lpc(struct pci_dev *dev)
{
u8 val;
int asus_hides_ac97 = 0;
if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
if (dev->device == PCI_DEVICE_ID_VIA_8237)
asus_hides_ac97 = 1;
}
if (!asus_hides_ac97)
return;
pci_read_config_byte(dev, 0x50, &val);
if (val & 0xc0) {
pci_write_config_byte(dev, 0x50, val & (~0xc0));
pci_read_config_byte(dev, 0x50, &val);
if (val & 0xc0)
dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
val);
else
dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
/*
* If we are using libata we can drive this chip properly but must
* do this early on to make the additional device appear during
* the PCI scanning.
*/
static void quirk_jmicron_ata(struct pci_dev *pdev)
{
u32 conf1, conf5, class;
u8 hdr;
/* Only poke fn 0 */
if (PCI_FUNC(pdev->devfn))
return;
pci_read_config_dword(pdev, 0x40, &conf1);
pci_read_config_dword(pdev, 0x80, &conf5);
conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */
conf5 &= ~(1 << 24); /* Clear bit 24 */
switch (pdev->device) {
case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
case PCI_DEVICE_ID_JMICRON_JMB364: /* SATA dual ports */
/* The controller should be in single function ahci mode */
conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
break;
case PCI_DEVICE_ID_JMICRON_JMB365:
case PCI_DEVICE_ID_JMICRON_JMB366:
/* Redirect IDE second PATA port to the right spot */
conf5 |= (1 << 24);
/* Fall through */
case PCI_DEVICE_ID_JMICRON_JMB361:
case PCI_DEVICE_ID_JMICRON_JMB363:
case PCI_DEVICE_ID_JMICRON_JMB369:
/* Enable dual function mode, AHCI on fn 0, IDE fn1 */
/* Set the class codes correctly and then direct IDE 0 */
conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */
break;
case PCI_DEVICE_ID_JMICRON_JMB368:
/* The controller should be in single function IDE mode */
conf1 |= 0x00C00000; /* Set 22, 23 */
break;
}
pci_write_config_dword(pdev, 0x40, conf1);
pci_write_config_dword(pdev, 0x80, conf5);
/* Update pdev accordingly */
pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
pdev->hdr_type = hdr & 0x7f;
pdev->multifunction = !!(hdr & 0x80);
pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
pdev->class = class >> 8;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
#endif
static void quirk_jmicron_async_suspend(struct pci_dev *dev)
{
if (dev->multifunction) {
device_disable_async_suspend(&dev->dev);
dev_info(&dev->dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
}
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
#ifdef CONFIG_X86_IO_APIC
static void quirk_alder_ioapic(struct pci_dev *pdev)
{
int i;
if ((pdev->class >> 8) != 0xff00)
return;
/* the first BAR is the location of the IO APIC...we must
* not touch this (and it's already covered by the fixmap), so
* forcibly insert it into the resource tree */
if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
insert_resource(&iomem_resource, &pdev->resource[0]);
/* The next five BARs all seem to be rubbish, so just clean
* them out */
for (i = 1; i < 6; i++)
memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
#endif
static void quirk_pcie_mch(struct pci_dev *pdev)
{
pdev->no_msi = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
/*
* It's possible for the MSI to get corrupted if shpc and acpi
* are used together on certain PXH-based systems.
*/
static void quirk_pcie_pxh(struct pci_dev *dev)
{
dev->no_msi = 1;
dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n");
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
/*
* Some Intel PCI Express chipsets have trouble with downstream
* device power management.
*/
static void quirk_intel_pcie_pm(struct pci_dev *dev)
{
pci_pm_d3_delay = 120;
dev->no_d1d2 = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
#ifdef CONFIG_X86_IO_APIC
/*
* Boot interrupts on some chipsets cannot be turned off. For these chipsets,
* remap the original interrupt in the linux kernel to the boot interrupt, so
* that a PCI device's interrupt handler is installed on the boot interrupt
* line instead.
*/
static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
{
if (noioapicquirk || noioapicreroute)
return;
dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n",
dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
/*
* On some chipsets we can disable the generation of legacy INTx boot
* interrupts.
*/
/*
* IO-APIC1 on 6300ESB generates boot interrupts, see intel order no
* 300641-004US, section 5.7.3.
*/
#define INTEL_6300_IOAPIC_ABAR 0x40
#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
{
u16 pci_config_word;
if (noioapicquirk)
return;
pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
/*
* disable boot interrupts on HT-1000
*/
#define BC_HT1000_FEATURE_REG 0x64
#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
#define BC_HT1000_MAP_IDX 0xC00
#define BC_HT1000_MAP_DATA 0xC01
static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
{
u32 pci_config_dword;
u8 irq;
if (noioapicquirk)
return;
pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
BC_HT1000_PIC_REGS_ENABLE);
for (irq = 0x10; irq < 0x10 + 32; irq++) {
outb(irq, BC_HT1000_MAP_IDX);
outb(0x00, BC_HT1000_MAP_DATA);
}
pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
/*
* disable boot interrupts on AMD and ATI chipsets
*/
/*
* NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
* rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
* (due to an erratum).
*/
#define AMD_813X_MISC 0x40
#define AMD_813X_NOIOAMODE (1<<0)
#define AMD_813X_REV_B1 0x12
#define AMD_813X_REV_B2 0x13
static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
{
u32 pci_config_dword;
if (noioapicquirk)
return;
if ((dev->revision == AMD_813X_REV_B1) ||
(dev->revision == AMD_813X_REV_B2))
return;
pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
pci_config_dword &= ~AMD_813X_NOIOAMODE;
pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
#define AMD_8111_PCI_IRQ_ROUTING 0x56
static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
{
u16 pci_config_word;
if (noioapicquirk)
return;
pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
if (!pci_config_word) {
dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] already disabled\n",
dev->vendor, dev->device);
return;
}
pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
#endif /* CONFIG_X86_IO_APIC */
/*
* Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
* but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
* Re-allocate the region if needed...
*/
static void quirk_tc86c001_ide(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
if (r->start & 0x8) {
r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xf;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
quirk_tc86c001_ide);
/*
* PLX PCI 9050 PCI Target bridge controller has an errata that prevents the
* local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
* being read correctly if bit 7 of the base address is set.
* The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
* Re-allocate the regions to a 256-byte boundary if necessary.
*/
static void quirk_plx_pci9050(struct pci_dev *dev)
{
unsigned int bar;
/* Fixed in revision 2 (PCI 9052). */
if (dev->revision >= 2)
return;
for (bar = 0; bar <= 1; bar++)
if (pci_resource_len(dev, bar) == 0x80 &&
(pci_resource_start(dev, bar) & 0x80)) {
struct resource *r = &dev->resource[bar];
dev_info(&dev->dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
bar);
r->flags |= IORESOURCE_UNSET;
r->start = 0;
r->end = 0xff;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
quirk_plx_pci9050);
/*
* The following Meilhaus (vendor ID 0x1402) device IDs (amongst others)
* may be using the PLX PCI 9050: 0x0630, 0x0940, 0x0950, 0x0960, 0x100b,
* 0x1400, 0x140a, 0x140b, 0x14e0, 0x14ea, 0x14eb, 0x1604, 0x1608, 0x160c,
* 0x168f, 0x2000, 0x2600, 0x3000, 0x810a, 0x810b.
*
* Currently, device IDs 0x2000 and 0x2600 are used by the Comedi "me_daq"
* driver.
*/
DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
static void quirk_netmos(struct pci_dev *dev)
{
unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
unsigned int num_serial = dev->subsystem_device & 0xf;
/*
* These Netmos parts are multiport serial devices with optional
* parallel ports. Even when parallel ports are present, they
* are identified as class SERIAL, which means the serial driver
* will claim them. To prevent this, mark them as class OTHER.
* These combo devices should be claimed by parport_serial.
*
* The subdevice ID is of the form 0x00PS, where <P> is the number
* of parallel ports and <S> is the number of serial ports.
*/
switch (dev->device) {
case PCI_DEVICE_ID_NETMOS_9835:
/* Well, this rule doesn't hold for the following 9835 device */
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return;
case PCI_DEVICE_ID_NETMOS_9735:
case PCI_DEVICE_ID_NETMOS_9745:
case PCI_DEVICE_ID_NETMOS_9845:
case PCI_DEVICE_ID_NETMOS_9855:
if (num_parallel) {
dev_info(&dev->dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
dev->device, num_parallel, num_serial);
dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
(dev->class & 0xff);
}
}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
/*
* Quirk non-zero PCI functions to route VPD access through function 0 for
* devices that share VPD resources between functions. The functions are
* expected to be identical devices.
*/
static void quirk_f0_vpd_link(struct pci_dev *dev)
{
struct pci_dev *f0;
if (!PCI_FUNC(dev->devfn))
return;
f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
if (!f0)
return;
if (f0->vpd && dev->class == f0->class &&
dev->vendor == f0->vendor && dev->device == f0->device)
dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
pci_dev_put(f0);
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
static void quirk_e100_interrupt(struct pci_dev *dev)
{
u16 command, pmcsr;
u8 __iomem *csr;
u8 cmd_hi;
switch (dev->device) {
/* PCI IDs taken from drivers/net/e100.c */
case 0x1029:
case 0x1030 ... 0x1034:
case 0x1038 ... 0x103E:
case 0x1050 ... 0x1057:
case 0x1059:
case 0x1064 ... 0x106B:
case 0x1091 ... 0x1095:
case 0x1209:
case 0x1229:
case 0x2449:
case 0x2459:
case 0x245D:
case 0x27DC:
break;
default:
return;
}
/*
* Some firmware hands off the e100 with interrupts enabled,
* which can cause a flood of interrupts if packets are
* received before the driver attaches to the device. So
* disable all e100 interrupts here. The driver will
* re-enable them when it's ready.
*/
pci_read_config_word(dev, PCI_COMMAND, &command);
if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
return;
/*
* Check that the device is in the D0 power state. If it's not,
* there is no point to look any further.
*/
if (dev->pm_cap) {
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
return;
}
/* Convert from PCI bus to resource space. */
csr = ioremap(pci_resource_start(dev, 0), 8);
if (!csr) {
dev_warn(&dev->dev, "Can't map e100 registers\n");
return;
}
cmd_hi = readb(csr + 3);
if (cmd_hi == 0) {
dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; disabling\n");
writeb(1, csr + 3);
}
iounmap(csr);
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
/*
* The 82575 and 82598 may experience data corruption issues when transitioning
* out of L0S. To prevent this we need to disable L0S on the pci-e link
*/
static void quirk_disable_aspm_l0s(struct pci_dev *dev)
{
dev_info(&dev->dev, "Disabling L0s\n");
pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
static void fixup_rev1_53c810(struct pci_dev *dev)
{
u32 class = dev->class;
/*
* rev 1 ncr53c810 chips don't set the class at all which means
* they don't get their resources remapped. Fix that here.
*/
if (class)
return;
dev->class = PCI_CLASS_STORAGE_SCSI << 8;
dev_info(&dev->dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
class, dev->class);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
/* Enable 1k I/O space granularity on the Intel P64H2 */
static void quirk_p64h2_1k_io(struct pci_dev *dev)
{
u16 en1k;
pci_read_config_word(dev, 0x40, &en1k);
if (en1k & 0x200) {
dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n");
dev->io_window_1k = 1;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
/* Under some circumstances, AER is not linked with extended capabilities.
* Force it to be linked by setting the corresponding control bit in the
* config space.
*/
static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
{
uint8_t b;
if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
if (!(b & 0x20)) {
pci_write_config_byte(dev, 0xf41, b | 0x20);
dev_info(&dev->dev, "Linking AER extended capability\n");
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap);
static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
{
/*
* Disable PCI Bus Parking and PCI Master read caching on CX700
* which causes unspecified timing errors with a VT6212L on the PCI
* bus leading to USB2.0 packet loss.
*
* This quirk is only enabled if a second (on the external PCI bus)
* VT6212L is found -- the CX700 core itself also contains a USB
* host controller with the same PCI ID as the VT6212L.
*/
/* Count VT6212L instances */
struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
uint8_t b;
/* p should contain the first (internal) VT6212L -- see if we have
an external one by searching again */
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
if (!p)
return;
pci_dev_put(p);
if (pci_read_config_byte(dev, 0x76, &b) == 0) {
if (b & 0x40) {
/* Turn off PCI Bus Parking */
pci_write_config_byte(dev, 0x76, b ^ 0x40);
dev_info(&dev->dev, "Disabling VIA CX700 PCI parking\n");
}
}
if (pci_read_config_byte(dev, 0x72, &b) == 0) {
if (b != 0) {
/* Turn off PCI Master read caching */
pci_write_config_byte(dev, 0x72, 0x0);
/* Set PCI Master Bus time-out to "1x16 PCLK" */
pci_write_config_byte(dev, 0x75, 0x1);
/* Disable "Read FIFO Timer" */
pci_write_config_byte(dev, 0x77, 0x0);
dev_info(&dev->dev, "Disabling VIA CX700 PCI caching\n");
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
/*
* If a device follows the VPD format spec, the PCI core will not read or
* write past the VPD End Tag. But some vendors do not follow the VPD
* format spec, so we can't tell how much data is safe to access. Devices
* may behave unpredictably if we access too much. Blacklist these devices
* so we don't touch VPD at all.
*/
static void quirk_blacklist_vpd(struct pci_dev *dev)
{
if (dev->vpd) {
dev->vpd->len = 0;
dev_warn(&dev->dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
quirk_blacklist_vpd);
/*
* For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
* VPD end tag will hang the device. This problem was initially
* observed when a vpd entry was created in sysfs
* ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry
* will dump 32k of data. Reading a full 32k will cause an access
* beyond the VPD end tag causing the device to hang. Once the device
* is hung, the bnx2 driver will not be able to reset the device.
* We believe that it is legal to read beyond the end tag and
* therefore the solution is to limit the read/write length.
*/
static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
{
/*
* Only disable the VPD capability for 5706, 5706S, 5708,
* 5708S and 5709 rev. A
*/
if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
(dev->device == PCI_DEVICE_ID_NX2_5706S) ||
(dev->device == PCI_DEVICE_ID_NX2_5708) ||
(dev->device == PCI_DEVICE_ID_NX2_5708S) ||
((dev->device == PCI_DEVICE_ID_NX2_5709) &&
(dev->revision & 0xf0) == 0x0)) {
if (dev->vpd)
dev->vpd->len = 0x80;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5706,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5706S,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5708,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5708S,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709S,
quirk_brcm_570x_limit_vpd);
static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
{
u32 rev;
pci_read_config_dword(dev, 0xf4, &rev);
/* Only CAP the MRRS if the device is a 5719 A0 */
if (rev == 0x05719000) {
int readrq = pcie_get_readrq(dev);
if (readrq > 2048)
pcie_set_readrq(dev, 2048);
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5719,
quirk_brcm_5719_limit_mrrs);
/* Originally in EDAC sources for i82875P:
* Intel tells BIOS developers to hide device 6 which
* configures the overflow device access containing
* the DRBs - this is where we expose device 6.
* http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
*/
static void quirk_unhide_mch_dev6(struct pci_dev *dev)
{
u8 reg;
if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
pci_write_config_byte(dev, 0xF4, reg | 0x02);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
quirk_unhide_mch_dev6);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
quirk_unhide_mch_dev6);
#ifdef CONFIG_TILEPRO
/*
* The Tilera TILEmpower tilepro platform needs to set the link speed
* to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed
* setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe
* capability register of the PEX8624 PCIe switch. The switch
* supports link speed auto negotiation, but falsely sets
* the link speed to 5GT/s.
*/
static void quirk_tile_plx_gen1(struct pci_dev *dev)
{
if (tile_plx_gen1) {
pci_write_config_dword(dev, 0x98, 0x1);
mdelay(50);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
#endif /* CONFIG_TILEPRO */
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
* some other buses controlled by the chipset even if Linux is not
* aware of it. Instead of setting the flag on all buses in the
* machine, simply disable MSI globally.
*/
static void quirk_disable_all_msi(struct pci_dev *dev)
{
pci_no_msi();
dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n");
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void quirk_disable_msi(struct pci_dev *dev)
{
if (dev->subordinate) {
dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
/*
* The APC bridge device in AMD 780 family northbridges has some random
* OEM subsystem ID in its vendor ID register (erratum 18), so instead
* we use the possible vendor/device IDs of the host bridge for the
* declared quirk, and search for the APC bridge by slot number.
*/
static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
{
struct pci_dev *apc_bridge;
apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
if (apc_bridge) {
if (apc_bridge->device == 0x9602)
quirk_disable_msi(apc_bridge);
pci_dev_put(apc_bridge);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
static int msi_ht_cap_enabled(struct pci_dev *dev)
{
int pos, ttl = PCI_FIND_CAP_TTL;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0) {
dev_info(&dev->dev, "Found %s HT MSI Mapping\n",
flags & HT_MSI_FLAGS_ENABLE ?
"enabled" : "disabled");
return (flags & HT_MSI_FLAGS_ENABLE) != 0;
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
return 0;
}
/* Check the hypertransport MSI mapping to know whether MSI is enabled or not */
static void quirk_msi_ht_cap(struct pci_dev *dev)
{
if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
quirk_msi_ht_cap);
/* The nVidia CK804 chipset may have 2 HT MSI mappings.
* MSI are supported if the MSI capability set in any of these mappings.
*/
static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
{
struct pci_dev *pdev;
if (!dev->subordinate)
return;
/* check HT MSI cap on this chipset and the root one.
* a single one having MSI is enough to be sure that MSI are supported.
*/
pdev = pci_get_slot(dev->bus, 0);
if (!pdev)
return;
if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
pci_dev_put(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_msi_ht_cap);
/* Force enable MSI mapping capability on HT bridges */
static void ht_enable_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = PCI_FIND_CAP_TTL;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0) {
dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
flags | HT_MSI_FLAGS_ENABLE);
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
ht_enable_msi_mapping);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
ht_enable_msi_mapping);
/* The P5N32-SLI motherboards from Asus have a problem with msi
* for the MCP55 NIC. It is not yet determined whether the msi problem
* also affects other devices. As for now, turn off msi for this device.
*/
static void nvenet_msi_disable(struct pci_dev *dev)
{
const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
if (board_name &&
(strstr(board_name, "P5N32-SLI PREMIUM") ||
strstr(board_name, "P5N32-E SLI"))) {
dev_info(&dev->dev, "Disabling msi for MCP55 NIC on P5N32-SLI\n");
dev->no_msi = 1;
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_15,
nvenet_msi_disable);
/*
* Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
* config register. This register controls the routing of legacy
* interrupts from devices that route through the MCP55. If this register
* is misprogrammed, interrupts are only sent to the BSP, unlike
* conventional systems where the IRQ is broadcast to all online CPUs. Not
* having this register set properly prevents kdump from booting up
* properly, so let's make sure that we have it set correctly.
* Note that this is an undocumented register.
*/
static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
{
u32 cfg;
if (!pci_find_capability(dev, PCI_CAP_ID_HT))
return;
pci_read_config_dword(dev, 0x74, &cfg);
if (cfg & ((1 << 2) | (1 << 15))) {
printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
cfg &= ~((1 << 2) | (1 << 15));
pci_write_config_dword(dev, 0x74, cfg);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
nvbridge_check_legacy_irq_routing);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
nvbridge_check_legacy_irq_routing);
static int ht_check_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = PCI_FIND_CAP_TTL;
int found = 0;
/* check if there is HT MSI cap or enabled on this device */
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (found < 1)
found = 1;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0) {
if (flags & HT_MSI_FLAGS_ENABLE) {
if (found < 2) {
found = 2;
break;
}
}
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
return found;
}
static int host_bridge_with_leaf(struct pci_dev *host_bridge)
{
struct pci_dev *dev;
int pos;
int i, dev_no;
int found = 0;
dev_no = host_bridge->devfn >> 3;
for (i = dev_no + 1; i < 0x20; i++) {
dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
if (!dev)
continue;
/* found next host bridge ?*/
pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
if (pos != 0) {
pci_dev_put(dev);
break;
}
if (ht_check_msi_mapping(dev)) {
found = 1;
pci_dev_put(dev);
break;
}
pci_dev_put(dev);
}
return found;
}
#define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */
#define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */
static int is_end_of_ht_chain(struct pci_dev *dev)
{
int pos, ctrl_off;
int end = 0;
u16 flags, ctrl;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
if (!pos)
goto out;
pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
ctrl_off = ((flags >> 10) & 1) ?
PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
pci_read_config_word(dev, pos + ctrl_off, &ctrl);
if (ctrl & (1 << 6))
end = 1;
out:
return end;
}
static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
{
struct pci_dev *host_bridge;
int pos;
int i, dev_no;
int found = 0;
dev_no = dev->devfn >> 3;
for (i = dev_no; i >= 0; i--) {
host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
if (!host_bridge)
continue;
pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
if (pos != 0) {
found = 1;
break;
}
pci_dev_put(host_bridge);
}
if (!found)
return;
/* don't enable end_device/host_bridge with leaf directly here */
if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
host_bridge_with_leaf(host_bridge))
goto out;
/* root did that ! */
if (msi_ht_cap_enabled(host_bridge))
goto out;
ht_enable_msi_mapping(dev);
out:
pci_dev_put(host_bridge);
}
static void ht_disable_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = PCI_FIND_CAP_TTL;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0) {
dev_info(&dev->dev, "Disabling HT MSI Mapping\n");
pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
flags & ~HT_MSI_FLAGS_ENABLE);
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
}
static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
{
struct pci_dev *host_bridge;
int pos;
int found;
if (!pci_msi_enabled())
return;
/* check if there is HT MSI cap or enabled on this device */
found = ht_check_msi_mapping(dev);
/* no HT MSI CAP */
if (found == 0)
return;
/*
* HT MSI mapping should be disabled on devices that are below
* a non-Hypertransport host bridge. Locate the host bridge...
*/
host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (host_bridge == NULL) {
dev_warn(&dev->dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
return;
}
pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
if (pos != 0) {
/* Host bridge is to HT */
if (found == 1) {
/* it is not enabled, try to enable it */
if (all)
ht_enable_msi_mapping(dev);
else
nv_ht_enable_msi_mapping(dev);
}
goto out;
}
/* HT MSI is not enabled */
if (found == 1)
goto out;
/* Host bridge is not to HT, disable HT MSI mapping on this device */
ht_disable_msi_mapping(dev);
out:
pci_dev_put(host_bridge);
}
static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 1);
}
static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 0);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
}
static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
{
struct pci_dev *p;
/* SB700 MSI issue will be fixed at HW level from revision A21,
* we need check PCI REVISION ID of SMBus controller to get SB700
* revision.
*/
p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
NULL);
if (!p)
return;
if ((p->revision < 0x3B) && (p->revision >= 0x30))
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
pci_dev_put(p);
}
static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
{
/* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
if (dev->revision < 0x18) {
dev_info(&dev->dev, "set MSI_INTX_DISABLE_BUG flag\n");
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5780,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5780S,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5714,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5714S,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5715,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5715S,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
quirk_msi_intx_disable_qca_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
quirk_msi_intx_disable_qca_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
quirk_msi_intx_disable_qca_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
quirk_msi_intx_disable_qca_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
quirk_msi_intx_disable_qca_bug);
#endif /* CONFIG_PCI_MSI */
/* Allow manual resource allocation for PCI hotplug bridges
* via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
* some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
* kernel fails to allocate resources when hotplug device is
* inserted and PCI bus is rescanned.
*/
static void quirk_hotplug_bridge(struct pci_dev *dev)
{
dev->is_hotplug_bridge = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
/*
* This is a quirk for the Ricoh MMC controller found as a part of
* some mulifunction chips.
* This is very similar and based on the ricoh_mmc driver written by
* Philip Langdale. Thank you for these magic sequences.
*
* These chips implement the four main memory card controllers (SD, MMC, MS, xD)
* and one or both of cardbus or firewire.
*
* It happens that they implement SD and MMC
* support as separate controllers (and PCI functions). The linux SDHCI
* driver supports MMC cards but the chip detects MMC cards in hardware
* and directs them to the MMC controller - so the SDHCI driver never sees
* them.
*
* To get around this, we must disable the useless MMC controller.
* At that point, the SDHCI controller will start seeing them
* It seems to be the case that the relevant PCI registers to deactivate the
* MMC controller live on PCI function 0, which might be the cardbus controller
* or the firewire controller, depending on the particular chip in question
*
* This has to be done early, because as soon as we disable the MMC controller
* other pci functions shift up one level, e.g. function #2 becomes function
* #1, and this will confuse the pci core.
*/
#ifdef CONFIG_MMC_RICOH_MMC
static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
{
/* disable via cardbus interface */
u8 write_enable;
u8 write_target;
u8 disable;
/* disable must be done via function #0 */
if (PCI_FUNC(dev->devfn))
return;
pci_read_config_byte(dev, 0xB7, &disable);
if (disable & 0x02)
return;
pci_read_config_byte(dev, 0x8E, &write_enable);
pci_write_config_byte(dev, 0x8E, 0xAA);
pci_read_config_byte(dev, 0x8D, &write_target);
pci_write_config_byte(dev, 0x8D, 0xB7);
pci_write_config_byte(dev, 0xB7, disable | 0x02);
pci_write_config_byte(dev, 0x8E, write_enable);
pci_write_config_byte(dev, 0x8D, write_target);
dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n");
dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
{
/* disable via firewire interface */
u8 write_enable;
u8 disable;
/* disable must be done via function #0 */
if (PCI_FUNC(dev->devfn))
return;
/*
* RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
* certain types of SD/MMC cards. Lowering the SD base
* clock frequency from 200Mhz to 50Mhz fixes this issue.
*
* 0x150 - SD2.0 mode enable for changing base clock
* frequency to 50Mhz
* 0xe1 - Base clock frequency
* 0x32 - 50Mhz new clock frequency
* 0xf9 - Key register for 0x150
* 0xfc - key register for 0xe1
*/
if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
pci_write_config_byte(dev, 0xf9, 0xfc);
pci_write_config_byte(dev, 0x150, 0x10);
pci_write_config_byte(dev, 0xf9, 0x00);
pci_write_config_byte(dev, 0xfc, 0x01);
pci_write_config_byte(dev, 0xe1, 0x32);
pci_write_config_byte(dev, 0xfc, 0x00);
dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
}
pci_read_config_byte(dev, 0xCB, &disable);
if (disable & 0x02)
return;
pci_read_config_byte(dev, 0xCA, &write_enable);
pci_write_config_byte(dev, 0xCA, 0x57);
pci_write_config_byte(dev, 0xCB, disable | 0x02);
pci_write_config_byte(dev, 0xCA, write_enable);
dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/
#ifdef CONFIG_DMAR_TABLE
#define VTUNCERRMSK_REG 0x1ac
#define VTD_MSK_SPEC_ERRORS (1 << 31)
/*
* This is a quirk for masking vt-d spec defined errors to platform error
* handling logic. With out this, platforms using Intel 7500, 5500 chipsets
* (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
* on the RAS config settings of the platform) when a vt-d fault happens.
* The resulting SMI caused the system to hang.
*
* VT-d spec related errors are already handled by the VT-d OS code, so no
* need to report the same error through other channels.
*/
static void vtd_mask_spec_errors(struct pci_dev *dev)
{
u32 word;
pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
#endif
static void fixup_ti816x_class(struct pci_dev *dev)
{
u32 class = dev->class;
/* TI 816x devices do not have class code set when in PCIe boot mode */
dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
class, dev->class);
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
/* Some PCIe devices do not work reliably with the claimed maximum
* payload size supported.
*/
static void fixup_mpss_256(struct pci_dev *dev)
{
dev->pcie_mpss = 1; /* 256 bytes */
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
/* Intel 5000 and 5100 Memory controllers have an errata with read completion
* coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
* Since there is no way of knowing what the PCIE MPS on each fabric will be
* until all of the devices are discovered and buses walked, read completion
* coalescing must be disabled. Unfortunately, it cannot be re-enabled because
* it is possible to hotplug a device with MPS of 256B.
*/
static void quirk_intel_mc_errata(struct pci_dev *dev)
{
int err;
u16 rcc;
if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
pcie_bus_config == PCIE_BUS_DEFAULT)
return;
/* Intel errata specifies bits to change but does not say what they are.
* Keeping them magical until such time as the registers and values can
* be explained.
*/
err = pci_read_config_word(dev, 0x48, &rcc);
if (err) {
dev_err(&dev->dev, "Error attempting to read the read completion coalescing register\n");
return;
}
if (!(rcc & (1 << 10)))
return;
rcc &= ~(1 << 10);
err = pci_write_config_word(dev, 0x48, rcc);
if (err) {
dev_err(&dev->dev, "Error attempting to write the read completion coalescing register\n");
return;
}
pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n");
}
/* Intel 5000 series memory controllers and ports 2-7 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
/* Intel 5100 series memory controllers and ports 2-7 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
/*
* Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. To
* work around this, query the size it should be configured to by the device and
* modify the resource end to correspond to this new size.
*/
static void quirk_intel_ntb(struct pci_dev *dev)
{
int rc;
u8 val;
rc = pci_read_config_byte(dev, 0x00D0, &val);
if (rc)
return;
dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
rc = pci_read_config_byte(dev, 0x00D1, &val);
if (rc)
return;
dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
static ktime_t fixup_debug_start(struct pci_dev *dev,
void (*fn)(struct pci_dev *dev))
{
ktime_t calltime = 0;
dev_dbg(&dev->dev, "calling %pF\n", fn);
if (initcall_debug) {
pr_debug("calling %pF @ %i for %s\n",
fn, task_pid_nr(current), dev_name(&dev->dev));
calltime = ktime_get();
}
return calltime;
}
static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
void (*fn)(struct pci_dev *dev))
{
ktime_t delta, rettime;
unsigned long long duration;
if (initcall_debug) {
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
pr_debug("pci fixup %pF returned after %lld usecs for %s\n",
fn, duration, dev_name(&dev->dev));
}
}
/*
* Some BIOS implementations leave the Intel GPU interrupts enabled,
* even though no one is handling them (f.e. i915 driver is never loaded).
* Additionally the interrupt destination is not set up properly
* and the interrupt ends up -somewhere-.
*
* These spurious interrupts are "sticky" and the kernel disables
* the (shared) interrupt line after 100.000+ generated interrupts.
*
* Fix it by disabling the still enabled interrupts.
* This resolves crashes often seen on monitor unplug.
*/
#define I915_DEIER_REG 0x4400c
static void disable_igfx_irq(struct pci_dev *dev)
{
void __iomem *regs = pci_iomap(dev, 0, 0);
if (regs == NULL) {
dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n");
return;
}
/* Check if any interrupt line is still enabled */
if (readl(regs + I915_DEIER_REG) != 0) {
dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
writel(0, regs + I915_DEIER_REG);
}
pci_iounmap(dev, regs);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
/*
* PCI devices which are on Intel chips can skip the 10ms delay
* before entering D3 mode.
*/
static void quirk_remove_d3_delay(struct pci_dev *dev)
{
dev->d3_delay = 0;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
/* Intel Cherrytrail devices do not need 10ms d3_delay */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
/*
* Some devices may pass our check in pci_intx_mask_supported() if
* PCI_COMMAND_INTX_DISABLE works though they actually do not properly
* support this feature.
*/
static void quirk_broken_intx_masking(struct pci_dev *dev)
{
dev->broken_intx_masking = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
quirk_broken_intx_masking);
/*
* Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
* Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
*
* RTL8110SC - Fails under PCI device assignment using DisINTx masking.
*/
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
quirk_broken_intx_masking);
/*
* Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
* DisINTx can be set but the interrupt status bit is non-functional.
*/
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2,
quirk_broken_intx_masking);
static u16 mellanox_broken_intx_devs[] = {
PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
PCI_DEVICE_ID_MELLANOX_HERMON_EN,
PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
PCI_DEVICE_ID_MELLANOX_CONNECTX2,
PCI_DEVICE_ID_MELLANOX_CONNECTX3,
PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
};
#define CONNECTX_4_CURR_MAX_MINOR 99
#define CONNECTX_4_INTX_SUPPORT_MINOR 14
/*
* Check ConnectX-4/LX FW version to see if it supports legacy interrupts.
* If so, don't mark it as broken.
* FW minor > 99 means older FW version format and no INTx masking support.
* FW minor < 14 means new FW version format and no INTx masking support.
*/
static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
{
__be32 __iomem *fw_ver;
u16 fw_major;
u16 fw_minor;
u16 fw_subminor;
u32 fw_maj_min;
u32 fw_sub_min;
int i;
for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
if (pdev->device == mellanox_broken_intx_devs[i]) {
pdev->broken_intx_masking = 1;
return;
}
}
/* Getting here means Connect-IB cards and up. Connect-IB has no INTx
* support so shouldn't be checked further
*/
if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
return;
if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
return;
/* For ConnectX-4 and ConnectX-4LX, need to check FW support */
if (pci_enable_device_mem(pdev)) {
dev_warn(&pdev->dev, "Can't enable device memory\n");
return;
}
fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
if (!fw_ver) {
dev_warn(&pdev->dev, "Can't map ConnectX-4 initialization segment\n");
goto out;
}
/* Reading from resource space should be 32b aligned */
fw_maj_min = ioread32be(fw_ver);
fw_sub_min = ioread32be(fw_ver + 1);
fw_major = fw_maj_min & 0xffff;
fw_minor = fw_maj_min >> 16;
fw_subminor = fw_sub_min & 0xffff;
if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
dev_warn(&pdev->dev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
fw_major, fw_minor, fw_subminor, pdev->device ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
pdev->broken_intx_masking = 1;
}
iounmap(fw_ver);
out:
pci_disable_device(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
mellanox_check_broken_intx_masking);
static void quirk_no_bus_reset(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
}
/*
* Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
* The device will throw a Link Down error on AER-capable systems and
* regardless of AER, config space of the device is never accessible again
* and typically causes the system to hang or reset when access is attempted.
* http://www.spinics.net/lists/linux-pci/msg34797.html
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
static void quirk_no_pm_reset(struct pci_dev *dev)
{
/*
* We can't do a bus reset on root bus devices, but an ineffective
* PM reset may be better than nothing.
*/
if (!pci_is_root_bus(dev->bus))
dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
}
/*
* Some AMD/ATI GPUS (HD8570 - Oland) report that a D3hot->D0 transition
* causes a reset (i.e., they advertise NoSoftRst-). This transition seems
* to have no effect on the device: it retains the framebuffer contents and
* monitor sync. Advertising this support makes other layers, like VFIO,
* assume pci_reset_function() is viable for this device. Mark it as
* unavailable to skip it when testing reset methods.
*/
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
/*
* Thunderbolt controllers with broken MSI hotplug signaling:
* Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part
* of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge).
*/
static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
{
if (pdev->is_hotplug_bridge &&
(pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
pdev->revision <= 1))
pdev->no_msi = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
quirk_thunderbolt_hotplug_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
quirk_thunderbolt_hotplug_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
quirk_thunderbolt_hotplug_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_thunderbolt_hotplug_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
quirk_thunderbolt_hotplug_msi);
static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
{
pci_set_vpd_size(dev, 8192);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
*
* On Apple hardware the Cactus Ridge Thunderbolt controller needs to be
* shutdown before suspend. Otherwise the native host interface (NHI) will not
* be present after resume if a device was plugged in before suspend.
*
* The thunderbolt controller consists of a pcie switch with downstream
* bridges leading to the NHI and to the tunnel pci bridges.
*
* This quirk cuts power to the whole chip. Therefore we have to apply it
* during suspend_noirq of the upstream bridge.
*
* Power is automagically restored before resume. No action is needed.
*/
static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
{
acpi_handle bridge, SXIO, SXFP, SXLV;
if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
return;
bridge = ACPI_HANDLE(&dev->dev);
if (!bridge)
return;
/*
* SXIO and SXLV are present only on machines requiring this quirk.
* TB bridges in external devices might have the same device id as those
* on the host, but they will not have the associated ACPI methods. This
* implicitly checks that we are at the right bridge.
*/
if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
|| ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
|| ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
return;
dev_info(&dev->dev, "quirk: cutting power to thunderbolt controller...\n");
/* magic sequence */
acpi_execute_simple_method(SXIO, NULL, 1);
acpi_execute_simple_method(SXFP, NULL, 0);
msleep(300);
acpi_execute_simple_method(SXLV, NULL, 0);
acpi_execute_simple_method(SXIO, NULL, 0);
acpi_execute_simple_method(SXLV, NULL, 0);
}
DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_poweroff_thunderbolt);
/*
* Apple: Wait for the thunderbolt controller to reestablish pci tunnels.
*
* During suspend the thunderbolt controller is reset and all pci
* tunnels are lost. The NHI driver will try to reestablish all tunnels
* during resume. We have to manually wait for the NHI since there is
* no parent child relationship between the NHI and the tunneled
* bridges.
*/
static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
{
struct pci_dev *sibling = NULL;
struct pci_dev *nhi = NULL;
if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
return;
/*
* Find the NHI and confirm that we are a bridge on the tb host
* controller and not on a tb endpoint.
*/
sibling = pci_get_slot(dev->bus, 0x0);
if (sibling == dev)
goto out; /* we are the downstream bridge to the NHI */
if (!sibling || !sibling->subordinate)
goto out;
nhi = pci_get_slot(sibling->subordinate, 0x0);
if (!nhi)
goto out;
if (nhi->vendor != PCI_VENDOR_ID_INTEL
|| (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
|| nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
goto out;
dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
out:
pci_dev_put(nhi);
pci_dev_put(sibling);
}
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
quirk_apple_wait_for_thunderbolt);
#endif
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
ktime_t calltime;
for (; f < end; f++)
if ((f->class == (u32) (dev->class >> f->class_shift) ||
f->class == (u32) PCI_ANY_ID) &&
(f->vendor == dev->vendor ||
f->vendor == (u16) PCI_ANY_ID) &&
(f->device == dev->device ||
f->device == (u16) PCI_ANY_ID)) {
calltime = fixup_debug_start(dev, f->hook);
f->hook(dev);
fixup_debug_report(dev, calltime, f->hook);
}
}
extern struct pci_fixup __start_pci_fixups_early[];
extern struct pci_fixup __end_pci_fixups_early[];
extern struct pci_fixup __start_pci_fixups_header[];
extern struct pci_fixup __end_pci_fixups_header[];
extern struct pci_fixup __start_pci_fixups_final[];
extern struct pci_fixup __end_pci_fixups_final[];
extern struct pci_fixup __start_pci_fixups_enable[];
extern struct pci_fixup __end_pci_fixups_enable[];
extern struct pci_fixup __start_pci_fixups_resume[];
extern struct pci_fixup __end_pci_fixups_resume[];
extern struct pci_fixup __start_pci_fixups_resume_early[];
extern struct pci_fixup __end_pci_fixups_resume_early[];
extern struct pci_fixup __start_pci_fixups_suspend[];
extern struct pci_fixup __end_pci_fixups_suspend[];
extern struct pci_fixup __start_pci_fixups_suspend_late[];
extern struct pci_fixup __end_pci_fixups_suspend_late[];
static bool pci_apply_fixup_final_quirks;
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
{
struct pci_fixup *start, *end;
switch (pass) {
case pci_fixup_early:
start = __start_pci_fixups_early;
end = __end_pci_fixups_early;
break;
case pci_fixup_header:
start = __start_pci_fixups_header;
end = __end_pci_fixups_header;
break;
case pci_fixup_final:
if (!pci_apply_fixup_final_quirks)
return;
start = __start_pci_fixups_final;
end = __end_pci_fixups_final;
break;
case pci_fixup_enable:
start = __start_pci_fixups_enable;
end = __end_pci_fixups_enable;
break;
case pci_fixup_resume:
start = __start_pci_fixups_resume;
end = __end_pci_fixups_resume;
break;
case pci_fixup_resume_early:
start = __start_pci_fixups_resume_early;
end = __end_pci_fixups_resume_early;
break;
case pci_fixup_suspend:
start = __start_pci_fixups_suspend;
end = __end_pci_fixups_suspend;
break;
case pci_fixup_suspend_late:
start = __start_pci_fixups_suspend_late;
end = __end_pci_fixups_suspend_late;
break;
default:
/* stupid compiler warning, you would think with an enum... */
return;
}
pci_do_fixups(dev, start, end);
}
EXPORT_SYMBOL(pci_fixup_device);
static int __init pci_apply_final_quirks(void)
{
struct pci_dev *dev = NULL;
u8 cls = 0;
u8 tmp;
if (pci_cache_line_size)
printk(KERN_DEBUG "PCI: CLS %u bytes\n",
pci_cache_line_size << 2);
pci_apply_fixup_final_quirks = true;
for_each_pci_dev(dev) {
pci_fixup_device(pci_fixup_final, dev);
/*
* If arch hasn't set it explicitly yet, use the CLS
* value shared by all PCI devices. If there's a
* mismatch, fall back to the default value.
*/
if (!pci_cache_line_size) {
pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
if (!cls)
cls = tmp;
if (!tmp || cls == tmp)
continue;
printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
cls << 2, tmp << 2,
pci_dfl_cache_line_size << 2);
pci_cache_line_size = pci_dfl_cache_line_size;
}
}
if (!pci_cache_line_size) {
printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
cls << 2, pci_dfl_cache_line_size << 2);
pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
}
return 0;
}
fs_initcall_sync(pci_apply_final_quirks);
/*
* Following are device-specific reset methods which can be used to
* reset a single function if other methods (e.g. FLR, PM D0->D3) are
* not available.
*/
static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
{
/*
* http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
*
* The 82599 supports FLR on VFs, but FLR support is reported only
* in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5).
* Therefore, we can't use pcie_flr(), which checks the VF DEVCAP.
*/
if (probe)
return 0;
if (!pci_wait_for_pending_transaction(dev))
dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
msleep(100);
return 0;
}
#define SOUTH_CHICKEN2 0xc2004
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
#define MSG_CTL 0x45010
#define NSDE_PWR_STATE 0xd0100
#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */
static int reset_ivb_igd(struct pci_dev *dev, int probe)
{
void __iomem *mmio_base;
unsigned long timeout;
u32 val;
if (probe)
return 0;
mmio_base = pci_iomap(dev, 0, 0);
if (!mmio_base)
return -ENOMEM;
iowrite32(0x00000002, mmio_base + MSG_CTL);
/*
* Clobbering SOUTH_CHICKEN2 register is fine only if the next
* driver loaded sets the right bits. However, this's a reset and
* the bits have been set by i915 previously, so we clobber
* SOUTH_CHICKEN2 register directly here.
*/
iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
iowrite32(val, mmio_base + PCH_PP_CONTROL);
timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
do {
val = ioread32(mmio_base + PCH_PP_STATUS);
if ((val & 0xb0000000) == 0)
goto reset_complete;
msleep(10);
} while (time_before(jiffies, timeout));
dev_warn(&dev->dev, "timeout during reset\n");
reset_complete:
iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
pci_iounmap(dev, mmio_base);
return 0;
}
/*
* Device-specific reset method for Chelsio T4-based adapters.
*/
static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
{
u16 old_command;
u16 msix_flags;
/*
* If this isn't a Chelsio T4-based device, return -ENOTTY indicating
* that we have no device-specific reset method.
*/
if ((dev->device & 0xf000) != 0x4000)
return -ENOTTY;
/*
* If this is the "probe" phase, return 0 indicating that we can
* reset this device.
*/
if (probe)
return 0;
/*
* T4 can wedge if there are DMAs in flight within the chip and Bus
* Master has been disabled. We need to have it on till the Function
* Level Reset completes. (BUS_MASTER is disabled in
* pci_reset_function()).
*/
pci_read_config_word(dev, PCI_COMMAND, &old_command);
pci_write_config_word(dev, PCI_COMMAND,
old_command | PCI_COMMAND_MASTER);
/*
* Perform the actual device function reset, saving and restoring
* configuration information around the reset.
*/
pci_save_state(dev);
/*
* T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts
* are disabled when an MSI-X interrupt message needs to be delivered.
* So we briefly re-enable MSI-X interrupts for the duration of the
* FLR. The pci_restore_state() below will restore the original
* MSI-X state.
*/
pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
msix_flags |
PCI_MSIX_FLAGS_ENABLE |
PCI_MSIX_FLAGS_MASKALL);
/*
* Start of pcie_flr() code sequence. This reset code is a copy of
* the guts of pcie_flr() because that's not an exported function.
*/
if (!pci_wait_for_pending_transaction(dev))
dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
msleep(100);
/*
* End of pcie_flr() code sequence.
*/
/*
* Restore the configuration information (BAR values, etc.) including
* the original PCI Configuration Space Command word, and return
* success.
*/
pci_restore_state(dev);
pci_write_config_word(dev, PCI_COMMAND, old_command);
return 0;
}
#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
reset_ivb_igd },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
{ 0 }
};
/*
* These device-specific reset methods are here rather than in a driver
* because when a host assigns a device to a guest VM, the host may need
* to reset the device but probably doesn't have a driver for it.
*/
int pci_dev_specific_reset(struct pci_dev *dev, int probe)
{
const struct pci_dev_reset_methods *i;
for (i = pci_dev_reset_methods; i->reset; i++) {
if ((i->vendor == dev->vendor ||
i->vendor == (u16)PCI_ANY_ID) &&
(i->device == dev->device ||
i->device == (u16)PCI_ANY_ID))
return i->reset(dev, probe);
}
return -ENOTTY;
}
static void quirk_dma_func0_alias(struct pci_dev *dev)
{
if (PCI_FUNC(dev->devfn) != 0)
pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
}
/*
* https://bugzilla.redhat.com/show_bug.cgi?id=605888
*
* Some Ricoh devices use function 0 as the PCIe requester ID for DMA.
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
static void quirk_dma_func1_alias(struct pci_dev *dev)
{
if (PCI_FUNC(dev->devfn) != 1)
pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
}
/*
* Marvell 88SE9123 uses function 1 as the requester ID for DMA. In some
* SKUs function 1 is present and is a legacy IDE controller, in other
* SKUs this function is not present, making this a ghost requester.
* https://bugzilla.kernel.org/show_bug.cgi?id=42679
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
quirk_dma_func1_alias);
/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
PCI_DEVICE_ID_JMICRON_JMB388_ESD,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c117 */
DECLARE_PCI_FIXUP_HEADER(0x1c28, /* Lite-On */
0x0122, /* Plextor M6E (Marvell 88SS9183)*/
quirk_dma_func1_alias);
/*
* Some devices DMA with the wrong devfn, not just the wrong function.
* quirk_fixed_dma_alias() uses this table to create fixed aliases, where
* the alias is "fixed" and independent of the device devfn.
*
* For example, the Adaptec 3405 is a PCIe card with an Intel 80333 I/O
* processor. To software, this appears as a PCIe-to-PCI/X bridge with a
* single device on the secondary bus. In reality, the single exposed
* device at 0e.0 is the Address Translation Unit (ATU) of the controller
* that provides a bridge to the internal bus of the I/O processor. The
* controller supports private devices, which can be hidden from PCI config
* space. In the case of the Adaptec 3405, a private device at 01.0
* appears to be the DMA engine, which therefore needs to become a DMA
* alias for the device.
*/
static const struct pci_device_id fixed_dma_alias_tbl[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */
.driver_data = PCI_DEVFN(1, 0) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
PCI_VENDOR_ID_ADAPTEC2, 0x02bc), /* Adaptec 3805 */
.driver_data = PCI_DEVFN(1, 0) },
{ 0 }
};
static void quirk_fixed_dma_alias(struct pci_dev *dev)
{
const struct pci_device_id *id;
id = pci_match_id(fixed_dma_alias_tbl, dev);
if (id)
pci_add_dma_alias(dev, id->driver_data);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
/*
* A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in
* using the wrong DMA alias for the device. Some of these devices can be
* used as either forward or reverse bridges, so we need to test whether the
* device is operating in the correct mode. We could probably apply this
* quirk to PCI_ANY_ID, but for now we'll just use known offenders. The test
* is for a non-root, non-PCIe bridge where the upstream device is PCIe and
* is not a PCIe-to-PCI bridge, then @pdev is actually a PCIe-to-PCI bridge.
*/
static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
{
if (!pci_is_root_bus(pdev->bus) &&
pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
!pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
}
/* ASM1083/1085, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
quirk_use_pcie_bridge_dma_alias);
/* Tundra 8113, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c43 */
DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
/* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
/*
* MIC x200 NTB forwards PCIe traffic using multiple alien RIDs. They have to
* be added as aliases to the DMA device in order to allow buffer access
* when IOMMU is enabled. Following devfns have to match RIT-LUT table
* programmed in the EEPROM.
*/
static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
{
pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
/*
* Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
* class code. Fix it.
*/
static void quirk_tw686x_class(struct pci_dev *pdev)
{
u32 class = pdev->class;
/* Use "Multimedia controller" class */
pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
dev_info(&pdev->dev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
class, pdev->class);
}
DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
quirk_tw686x_class);
DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
quirk_tw686x_class);
DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
quirk_tw686x_class);
DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
quirk_tw686x_class);
/*
* Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
* values for the Attribute as were supplied in the header of the
* corresponding Request, except as explicitly allowed when IDO is used."
*
* If a non-compliant device generates a completion with a different
* attribute than the request, the receiver may accept it (which itself
* seems non-compliant based on sec 2.3.2), or it may handle it as a
* Malformed TLP or an Unexpected Completion, which will probably lead to a
* device access timeout.
*
* If the non-compliant device generates completions with zero attributes
* (instead of copying the attributes from the request), we can work around
* this by disabling the "Relaxed Ordering" and "No Snoop" attributes in
* upstream devices so they always generate requests with zero attributes.
*
* This affects other devices under the same Root Port, but since these
* attributes are performance hints, there should be no functional problem.
*
* Note that Configuration Space accesses are never supposed to have TLP
* Attributes, so we're safe waiting till after any Configuration Space
* accesses to do the Root Port fixup.
*/
static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
{
struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
if (!root_port) {
dev_warn(&pdev->dev, "PCIe Completion erratum may cause device errors\n");
return;
}
dev_info(&root_port->dev, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
dev_name(&pdev->dev));
pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_RELAX_EN |
PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
}
/*
* The Chelsio T5 chip fails to copy TLP Attributes from a Request to the
* Completion it generates.
*/
static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
{
/*
* This mask/compare operation selects for Physical Function 4 on a
* T5. We only need to fix up the Root Port once for any of the
* PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely
* 0x54xx so we use that one,
*/
if ((pdev->device & 0xff00) == 0x5400)
quirk_disable_root_port_attributes(pdev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
quirk_chelsio_T5_disable_root_port_attributes);
/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
* peer-to-peer between functions can claim to support a subset of ACS.
* Such devices effectively enable request redirect (RR) and completion
* redirect (CR) since all transactions are redirected to the upstream
* root complex.
*
* http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086
* http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102
* http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402
*
* 1002:4385 SBx00 SMBus Controller
* 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller
* 1002:4383 SBx00 Azalia (Intel HDA)
* 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
* 1002:4384 SBx00 PCI to PCI Bridge
* 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
*
* https://bugzilla.kernel.org/show_bug.cgi?id=81841#c15
*
* 1022:780f [AMD] FCH PCI Bridge
* 1022:7809 [AMD] FCH USB OHCI Controller
*/
static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
{
#ifdef CONFIG_ACPI
struct acpi_table_header *header = NULL;
acpi_status status;
/* Targeting multifunction devices on the SB (appears on root bus) */
if (!dev->multifunction || !pci_is_root_bus(dev->bus))
return -ENODEV;
/* The IVRS table describes the AMD IOMMU */
status = acpi_get_table("IVRS", 0, &header);
if (ACPI_FAILURE(status))
return -ENODEV;
/* Filter out flags not applicable to multifunction */
acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
#else
return -ENODEV;
#endif
}
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
* Cavium devices matching this quirk do not perform peer-to-peer
* with other functions, allowing masking out these bits as if they
* were unimplemented in the ACS capability.
*/
acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
return acs_flags ? 0 : 1;
}
/*
* Many Intel PCH root ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. This is the list of device IDs known to fall
* into that category as provided by Intel in Red Hat bugzilla 1037684.
*/
static const u16 pci_quirk_intel_pch_acs_ids[] = {
/* Ibexpeak PCH */
0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
/* Cougarpoint PCH */
0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
/* Pantherpoint PCH */
0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
/* Lynxpoint-H PCH */
0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
/* Lynxpoint-LP PCH */
0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
/* Wildcat PCH */
0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
/* Patsburg (X79) PCH */
0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
/* Wellsburg (X99) PCH */
0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
/* Lynx Point (9 series) PCH */
0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
};
static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
{
int i;
/* Filter out a few obvious non-matches first */
if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
return false;
for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
return true;
return false;
}
#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
{
u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
INTEL_PCH_ACS_FLAGS : 0;
if (!pci_quirk_intel_pch_acs_match(dev))
return -ENOTTY;
return acs_flags & ~flags ? 0 : 1;
}
/*
* Sunrise Point PCH root ports implement ACS, but unfortunately as shown in
* the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2,
* 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and
* control registers whereas the PCIe spec packs them into words (Rev 3.0,
* 7.16 ACS Extended Capability). The bit definitions are correct, but the
* control register is at offset 8 instead of 6 and we should probably use
* dword accesses to them. This applies to the following PCI Device IDs, as
* found in volume 1 of the datasheet[2]:
*
* 0xa110-0xa11f Sunrise Point-H PCI Express Root Port #{0-16}
* 0xa167-0xa16a Sunrise Point-H PCI Express Root Port #{17-20}
*
* N.B. This doesn't fix what lspci shows.
*
* [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
* [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
*/
static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
{
return pci_is_pcie(dev) &&
pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
((dev->device & ~0xf) == 0xa110 ||
(dev->device >= 0xa167 && dev->device <= 0xa16a));
}
#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
{
int pos;
u32 cap, ctrl;
if (!pci_quirk_intel_spt_pch_acs_match(dev))
return -ENOTTY;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
if (!pos)
return -ENOTTY;
/* see pci_acs_flags_enabled() */
pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
acs_flags &= (cap | PCI_ACS_EC);
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
return acs_flags & ~ctrl ? 0 : 1;
}
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
* SV, TB, and UF are not relevant to multifunction endpoints.
*
* Multifunction devices are only required to implement RR, CR, and DT
* in their ACS capability if they support peer-to-peer transactions.
* Devices matching this quirk have been verified by the vendor to not
* perform peer-to-peer with other functions, allowing us to mask out
* these bits as if they were unimplemented in the ACS capability.
*/
acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
return acs_flags ? 0 : 1;
}
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
} pci_dev_acs_enabled[] = {
{ PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
/* 82580 */
{ PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs },
/* 82576 */
{ PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs },
/* 82575 */
{ PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs },
/* I350 */
{ PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs },
/* 82571 (Quads omitted due to non-ACS switch) */
{ PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
/* I219 */
{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
/* Intel PCH root ports */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
/* Cavium ThunderX */
{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
{ 0 }
};
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
{
const struct pci_dev_acs_enabled *i;
int ret;
/*
* Allow devices that do not expose standard PCIe ACS capabilities
* or control to indicate their support here. Multi-function express
* devices which do not allow internal peer-to-peer between functions,
* but do not implement PCIe ACS may wish to return true here.
*/
for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
if ((i->vendor == dev->vendor ||
i->vendor == (u16)PCI_ANY_ID) &&
(i->device == dev->device ||
i->device == (u16)PCI_ANY_ID)) {
ret = i->acs_enabled(dev, acs_flags);
if (ret >= 0)
return ret;
}
}
return -ENOTTY;
}
/* Config space offset of Root Complex Base Address register */
#define INTEL_LPC_RCBA_REG 0xf0
/* 31:14 RCBA address */
#define INTEL_LPC_RCBA_MASK 0xffffc000
/* RCBA Enable */
#define INTEL_LPC_RCBA_ENABLE (1 << 0)
/* Backbone Scratch Pad Register */
#define INTEL_BSPR_REG 0x1104
/* Backbone Peer Non-Posted Disable */
#define INTEL_BSPR_REG_BPNPD (1 << 8)
/* Backbone Peer Posted Disable */
#define INTEL_BSPR_REG_BPPD (1 << 9)
/* Upstream Peer Decode Configuration Register */
#define INTEL_UPDCR_REG 0x1114
/* 5:0 Peer Decode Enable bits */
#define INTEL_UPDCR_REG_MASK 0x3f
static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
{
u32 rcba, bspr, updcr;
void __iomem *rcba_mem;
/*
* Read the RCBA register from the LPC (D31:F0). PCH root ports
* are D28:F* and therefore get probed before LPC, thus we can't
* use pci_get_slot/pci_read_config_dword here.
*/
pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
INTEL_LPC_RCBA_REG, &rcba);
if (!(rcba & INTEL_LPC_RCBA_ENABLE))
return -EINVAL;
rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
PAGE_ALIGN(INTEL_UPDCR_REG));
if (!rcba_mem)
return -ENOMEM;
/*
* The BSPR can disallow peer cycles, but it's set by soft strap and
* therefore read-only. If both posted and non-posted peer cycles are
* disallowed, we're ok. If either are allowed, then we need to use
* the UPDCR to disable peer decodes for each port. This provides the
* PCIe ACS equivalent of PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF
*/
bspr = readl(rcba_mem + INTEL_BSPR_REG);
bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
updcr = readl(rcba_mem + INTEL_UPDCR_REG);
if (updcr & INTEL_UPDCR_REG_MASK) {
dev_info(&dev->dev, "Disabling UPDCR peer decodes\n");
updcr &= ~INTEL_UPDCR_REG_MASK;
writel(updcr, rcba_mem + INTEL_UPDCR_REG);
}
}
iounmap(rcba_mem);
return 0;
}
/* Miscellaneous Port Configuration register */
#define INTEL_MPC_REG 0xd8
/* MPC: Invalid Receive Bus Number Check Enable */
#define INTEL_MPC_REG_IRBNCE (1 << 26)
static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
{
u32 mpc;
/*
* When enabled, the IRBNCE bit of the MPC register enables the
* equivalent of PCI ACS Source Validation (PCI_ACS_SV), which
* ensures that requester IDs fall within the bus number range
* of the bridge. Enable if not already.
*/
pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
dev_info(&dev->dev, "Enabling MPC IRBNCE\n");
mpc |= INTEL_MPC_REG_IRBNCE;
pci_write_config_word(dev, INTEL_MPC_REG, mpc);
}
}
static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
{
if (!pci_quirk_intel_pch_acs_match(dev))
return -ENOTTY;
if (pci_quirk_enable_intel_lpc_acs(dev)) {
dev_warn(&dev->dev, "Failed to enable Intel PCH ACS quirk\n");
return 0;
}
pci_quirk_enable_intel_rp_mpc_acs(dev);
dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
dev_info(&dev->dev, "Intel PCH root port ACS workaround enabled\n");
return 0;
}
static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
{
int pos;
u32 cap, ctrl;
if (!pci_quirk_intel_spt_pch_acs_match(dev))
return -ENOTTY;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
if (!pos)
return -ENOTTY;
pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
ctrl |= (cap & PCI_ACS_SV);
ctrl |= (cap & PCI_ACS_RR);
ctrl |= (cap & PCI_ACS_CR);
ctrl |= (cap & PCI_ACS_UF);
pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
dev_info(&dev->dev, "Intel SPT PCH root port ACS workaround enabled\n");
return 0;
}
static const struct pci_dev_enable_acs {
u16 vendor;
u16 device;
int (*enable_acs)(struct pci_dev *dev);
} pci_dev_enable_acs[] = {
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs },
{ 0 }
};
int pci_dev_specific_enable_acs(struct pci_dev *dev)
{
const struct pci_dev_enable_acs *i;
int ret;
for (i = pci_dev_enable_acs; i->enable_acs; i++) {
if ((i->vendor == dev->vendor ||
i->vendor == (u16)PCI_ANY_ID) &&
(i->device == dev->device ||
i->device == (u16)PCI_ANY_ID)) {
ret = i->enable_acs(dev);
if (ret >= 0)
return ret;
}
}
return -ENOTTY;
}
/*
* The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with
* QuickAssist Technology (QAT) is prematurely terminated in hardware. The
* Next Capability pointer in the MSI Capability Structure should point to
* the PCIe Capability Structure but is incorrectly hardwired as 0 terminating
* the list.
*/
static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
{
int pos, i = 0;
u8 next_cap;
u16 reg16, *cap;
struct pci_cap_saved_state *state;
/* Bail if the hardware bug is fixed */
if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
return;
/* Bail if MSI Capability Structure is not found for some reason */
pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
if (!pos)
return;
/*
* Bail if Next Capability pointer in the MSI Capability Structure
* is not the expected incorrect 0x00.
*/
pci_read_config_byte(pdev, pos + 1, &next_cap);
if (next_cap)
return;
/*
* PCIe Capability Structure is expected to be at 0x50 and should
* terminate the list (Next Capability pointer is 0x00). Verify
* Capability Id and Next Capability pointer is as expected.
* Open-code some of set_pcie_port_type() and pci_cfg_space_size_ext()
* to correctly set kernel data structures which have already been
* set incorrectly due to the hardware bug.
*/
pos = 0x50;
pci_read_config_word(pdev, pos, ®16);
if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
u32 status;
#ifndef PCI_EXP_SAVE_REGS
#define PCI_EXP_SAVE_REGS 7
#endif
int size = PCI_EXP_SAVE_REGS * sizeof(u16);
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
pdev->pcie_flags_reg = reg16;
pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
pdev->cfg_size = PCI_CFG_SPACE_SIZE;
if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
return;
/*
* Save PCIE cap
*/
state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
if (!state)
return;
state->cap.cap_nr = PCI_CAP_ID_EXP;
state->cap.cap_extended = 0;
state->cap.size = size;
cap = (u16 *)&state->cap.data[0];
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]);
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
hlist_add_head(&state->next, &pdev->saved_cap_space);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
/*
* VMD-enabled root ports will change the source ID for all messages
* to the VMD device. Rather than doing device matching with the source
* ID, the AER driver should traverse the child device tree, reading
* AER registers to find the faulting device.
*/
static void quirk_no_aersid(struct pci_dev *pdev)
{
/* VMD Domain */
if (pdev->bus->sysdata && pci_domain_nr(pdev->bus) >= 0x10000)
pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
| aospan/linux-next-bcm4708-edgecore-ecw7220-l | drivers/pci/quirks.c | C | gpl-2.0 | 164,319 |
/*
* Read flash partition table from command line
*
* Copyright © 2002 SYSGO Real-Time Solutions GmbH
* Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* The format for the command line is as follows:
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
* where <mtd-id> is the name from the "cat /proc/mtd" command
* <partdef> := <size>[@offset][<name>][ro][lk]
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
* <name> := '(' NAME ')'
*
* Examples:
*
* 1 NOR Flash, with 1 single writable partition:
* edb7312-nor:-
*
* 1 NOR Flash with 2 partitions, 1 NAND with one
* edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home)
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/bootmem.h>
/* error message prefix */
#define ERRP "mtd: "
/* debug macro */
#if 0
#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
#else
#define dbg(x)
#endif
/* special size referring to all the remaining space in a partition */
#define SIZE_REMAINING UINT_MAX
#define OFFSET_CONTINUOUS UINT_MAX
struct cmdline_mtd_partition {
struct cmdline_mtd_partition *next;
char *mtd_id;
int num_parts;
struct mtd_partition *parts;
};
/* mtdpart_setup() parses into here */
static struct cmdline_mtd_partition *partitions;
/* the command line passed to mtdpart_setupd() */
static char *cmdline;
static int cmdline_parsed = 0;
/*
* Parse one partition definition for an MTD. Since there can be many
* comma separated partition definitions, this function calls itself
* recursively until no more partition definitions are found. Nice side
* effect: the memory to keep the mtd_partition structs and the names
* is allocated upon the last definition being found. At that point the
* syntax has been verified ok.
*/
static struct mtd_partition * newpart(char *s,
char **retptr,
int *num_parts,
int this_part,
unsigned char **extra_mem_ptr,
int extra_mem_size)
{
struct mtd_partition *parts;
unsigned long size;
unsigned long offset = OFFSET_CONTINUOUS;
char *name;
int name_len;
unsigned char *extra_mem;
char delim;
unsigned int mask_flags;
/* fetch the partition size */
if (*s == '-')
{ /* assign all remaining space to this partition */
size = SIZE_REMAINING;
s++;
}
else
{
size = memparse(s, &s);
if (size < PAGE_SIZE)
{
printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
return NULL;
}
}
/* fetch partition name and flags */
mask_flags = 0; /* this is going to be a regular partition */
delim = 0;
/* check for offset */
if (*s == '@')
{
s++;
offset = memparse(s, &s);
}
/* now look for name */
if (*s == '(')
{
delim = ')';
}
if (delim)
{
char *p;
name = ++s;
p = strchr(name, delim);
if (!p)
{
printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
return NULL;
}
name_len = p - name;
s = p + 1;
}
else
{
name = NULL;
name_len = 13; /* Partition_000 */
}
/* record name length for memory allocation later */
extra_mem_size += name_len + 1;
/* test for options */
if (strncmp(s, "ro", 2) == 0)
{
mask_flags |= MTD_WRITEABLE;
s += 2;
}
/* if lk is found do NOT unlock the MTD partition*/
if (strncmp(s, "lk", 2) == 0)
{
mask_flags |= MTD_POWERUP_LOCK;
s += 2;
}
/* test if more partitions are following */
if (*s == ',')
{
if (size == SIZE_REMAINING)
{
printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n");
return NULL;
}
/* more partitions follow, parse them */
parts = newpart(s + 1, &s, num_parts, this_part + 1,
&extra_mem, extra_mem_size);
if (!parts)
return NULL;
}
else
{ /* this is the last partition: allocate space for all */
int alloc_size;
*num_parts = this_part + 1;
alloc_size = *num_parts * sizeof(struct mtd_partition) +
extra_mem_size;
parts = kzalloc(alloc_size, GFP_KERNEL);
if (!parts)
{
printk(KERN_ERR ERRP "out of memory\n");
return NULL;
}
extra_mem = (unsigned char *)(parts + *num_parts);
}
/* enter this partition (offset will be calculated later if it is zero at this point) */
parts[this_part].size = size;
parts[this_part].offset = offset;
parts[this_part].mask_flags = mask_flags;
if (name)
{
strlcpy((char *)extra_mem, name, name_len + 1);
}
else
{
sprintf((char *)extra_mem, "Partition_%03d", this_part);
}
parts[this_part].name = (char *)extra_mem;
extra_mem += name_len + 1;
dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
this_part,
parts[this_part].name,
parts[this_part].offset,
parts[this_part].size,
parts[this_part].mask_flags));
/* return (updated) pointer to extra_mem memory */
if (extra_mem_ptr)
*extra_mem_ptr = extra_mem;
/* return (updated) pointer command line string */
*retptr = s;
/* return partition table */
return parts;
}
/*
* Parse the command line.
*/
static int mtdpart_setup_real(char *s)
{
cmdline_parsed = 1;
for( ; s != NULL; )
{
struct cmdline_mtd_partition *this_mtd;
struct mtd_partition *parts;
int mtd_id_len;
int num_parts;
char *p, *mtd_id;
mtd_id = s;
/* fetch <mtd-id> */
if (!(p = strchr(s, ':')))
{
printk(KERN_ERR ERRP "no mtd-id\n");
return 0;
}
mtd_id_len = p - mtd_id;
dbg(("parsing <%s>\n", p+1));
/*
* parse one mtd. have it reserve memory for the
* struct cmdline_mtd_partition and the mtd-id string.
*/
parts = newpart(p + 1, /* cmdline */
&s, /* out: updated cmdline ptr */
&num_parts, /* out: number of parts */
0, /* first partition */
(unsigned char**)&this_mtd, /* out: extra mem */
mtd_id_len + 1 + sizeof(*this_mtd) +
sizeof(void*)-1 /*alignment*/);
if(!parts)
{
/*
* An error occurred. We're either:
* a) out of memory, or
* b) in the middle of the partition spec
* Either way, this mtd is hosed and we're
* unlikely to succeed in parsing any more
*/
return 0;
}
/* align this_mtd */
this_mtd = (struct cmdline_mtd_partition *)
ALIGN((unsigned long)this_mtd, sizeof(void*));
/* enter results */
this_mtd->parts = parts;
this_mtd->num_parts = num_parts;
this_mtd->mtd_id = (char*)(this_mtd + 1);
strlcpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1);
/* link into chain */
this_mtd->next = partitions;
partitions = this_mtd;
dbg(("mtdid=<%s> num_parts=<%d>\n",
this_mtd->mtd_id, this_mtd->num_parts));
/* EOS - we're done */
if (*s == 0)
break;
/* does another spec follow? */
if (*s != ';')
{
printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s);
return 0;
}
s++;
}
return 1;
}
/*
* Main function to be called from the MTD mapping driver/device to
* obtain the partitioning information. At this point the command line
* arguments will actually be parsed and turned to struct mtd_partition
* information. It returns partitions for the requested mtd device, or
* the first one in the chain if a NULL mtd_id is passed in.
*/
static int parse_cmdline_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
unsigned long origin)
{
unsigned long offset;
int i;
struct cmdline_mtd_partition *part;
const char *mtd_id = master->name;
/* parse command line */
if (!cmdline_parsed)
mtdpart_setup_real(cmdline);
for(part = partitions; part; part = part->next)
{
if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
{
for(i = 0, offset = 0; i < part->num_parts; i++)
{
if (part->parts[i].offset == OFFSET_CONTINUOUS)
part->parts[i].offset = offset;
else
offset = part->parts[i].offset;
if (part->parts[i].size == SIZE_REMAINING)
part->parts[i].size = master->size - offset;
if (offset + part->parts[i].size > master->size)
{
printk(KERN_WARNING ERRP
"%s: partitioning exceeds flash size, truncating\n",
part->mtd_id);
part->parts[i].size = master->size - offset;
part->num_parts = i;
}
offset += part->parts[i].size;
}
*pparts = kmemdup(part->parts,
sizeof(*part->parts) * part->num_parts,
GFP_KERNEL);
if (!*pparts)
return -ENOMEM;
return part->num_parts;
}
}
return 0;
}
/*
* This is the handler for our kernel parameter, called from
* main.c::checksetup(). Note that we can not yet kmalloc() anything,
* so we only save the commandline for later processing.
*
* This function needs to be visible for bootloaders.
*/
static int mtdpart_setup(char *s)
{
cmdline = s;
return 1;
}
__setup("mtdparts=", mtdpart_setup);
static struct mtd_part_parser cmdline_parser = {
.owner = THIS_MODULE,
.parse_fn = parse_cmdline_partitions,
.name = "cmdlinepart",
};
static int __init cmdline_parser_init(void)
{
return register_mtd_parser(&cmdline_parser);
}
module_init(cmdline_parser_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
MODULE_DESCRIPTION("Command line configuration of MTD partitions");
| de-wolff/android_kernel_motorola_xt320 | drivers/mtd/cmdlinepart.c | C | gpl-2.0 | 10,218 |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Test that the file is open for read acces when the applications specify
* the value of O_RDWR.
*
* Step:
* 1. Create a file a shared memory object using shm_open().
* 2. Write in the file referenced by the file descriptor return by
* shm_open().
* 3. Reopen the file with shm_open() and O_RDWR set.
* 4. Read in the file.
* The test pass if it read what it was previously written.
*/
/* ftruncate was formerly an XOPEN extension. We define _XOPEN_SOURCE here to
avoid warning if the implementation does not program ftruncate as a base
interface */
#define _XOPEN_SOURCE 600
#include <stdio.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include "posixtest.h"
#define BUF_SIZE 8
#define SHM_NAME "posixtest_14-2"
int main() {
int fd;
char str[BUF_SIZE] = "qwerty";
char *buf;
fd = shm_open(SHM_NAME, O_RDWR|O_CREAT, S_IRUSR|S_IWUSR);
if (fd == -1) {
perror("An error occurs when calling shm_open()");
return PTS_UNRESOLVED;
}
if (ftruncate(fd, BUF_SIZE) != 0) {
perror("An error occurs when calling ftruncate()");
return PTS_UNRESOLVED;
}
buf = mmap(NULL, BUF_SIZE, PROT_WRITE, MAP_SHARED, fd, 0);
if (buf == MAP_FAILED) {
perror("An error occurs when calling mmap()");
return PTS_UNRESOLVED;
}
strcpy(buf, str);
fd = shm_open(SHM_NAME, O_RDWR, S_IRUSR|S_IWUSR);
if (fd == -1) {
perror("An error occurs when calling shm_open()");
return PTS_UNRESOLVED;
}
buf = mmap(NULL, BUF_SIZE, PROT_READ, MAP_SHARED, fd, 0);
if (buf == MAP_FAILED) {
perror("An error occurs when calling mmap()");
return PTS_UNRESOLVED;
}
shm_unlink(SHM_NAME);
if (strcmp(buf, str) == 0) {
printf("Test PASSED\n");
return PTS_PASS;
}
printf("Test FAILED\n");
return PTS_FAIL;
} | anthony-kolesov/arc_ltp | testcases/open_posix_testsuite/conformance/interfaces/shm_open/14-2.c | C | gpl-2.0 | 2,192 |
/* OS134, Copyright (C) 2005, Benjamin Stein, Jaap Weel, Ting Liao --
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version. This program is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details. You
should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation,
Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
* Loader. loads an a.out into the memory.
*/
/* The starting address for the text (code) section - 64K of code. */
#define I_ADDRESS 0x600000
/* The starting address for the data section - 64K of data. */
#define D_ADDRESS 0x800000
typedef unsigned short uint16_t;
typedef unsigned long uint32_t;
typedef struct {
uint16_t magic __PACKED__; /* Magic number. */
uint16_t version __PACKED__; /* Version number. */
uint32_t code_size __PACKED__; /* Text segment size. */
uint32_t data_size __PACKED__; /* Initialized data size. */
uint32_t bss_size __PACKED__; /* Uninitialized data size. */
uint32_t syms_size __PACKED__;
uint32_t entry __PACKED__;
uint32_t code_offset __PACKED__;
uint32_t data_offset __PACKED__;
} aout_head_t; /* 28 bytes */
/*
* Since the OS does not have File I/O code yet, we need an image
* pointer that points to the file I have not figured out a way to do
* that yet, one reason is that we have no file system in the OS yet.
*/
/*
* Entry is the entry point of the program.
*/
int load_aout(char *filename, unsigned char *image, unsigned *entry)
{
/*
* Load the a.out format file filename.
*
* Read the text segment from the file to I_ADDRESS.
*
* Read the data segment from the file to D_ADDRESS. Zero out the BSS segment.
*
* Create and map in a stack segment (usually separate from the data
* segment, since the data heap and stack grow separately.) Place
* arguments from the command line or calling program on the stack.
*
* Set registers appropriately and jump to the starting address.
*/
aout_head_t *aout;
/* Validate headers. */
aout = (aout_head_t *) image;
image += sizeof(aout_head_t); /* Move to the code section. */
/* Get entry point. */
(*entry) = aout->entry;
/* Load text to I_ADDRESS.
*
* Copy aout->code_size bytes of code starting at image +
* code_offset to I_ADDRESS. */
image += aout->code_size;
/* Load DATA to D_ADDRESS.
*
* Copy aout->data_size bytes of code starting at image +
* data_offset to D_ADDRESS. */
image += aout->data_size;
/* Set uninitialized data to 0. */
/* Copy bss_size bytes of 0 starting at D_ADDRESS + aout->data_size. */
}
| jaapweel/os134 | loader.c | C | gpl-2.0 | 3,002 |
/***********************************************
* Author: Alexander Oro Acebo
* Date: 01/22/2015
* Description: Change current dir
* ********************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/shm.h>
#include "../include/RootDir.h"
#include "../include/fatSupport.h"
#include "../include/Shared_Info.h"
#define BYTES_TO_READ_IN_BOOT_SECTOR 512
#define BLUE "\x1B[1;36m" // for listing directories as blue
#define RESET "\033[0m"
#define BOLD "\033[1m"
FILE* FILE_SYSTEM_ID;
int BYTES_PER_SECTOR;
char* NEW_DIR;
Shared_Info *CUR_DIR;
void usage();
char* readFile(char*, char*[]);
char* readFileNonRoot(char*);
void getSharedMem(int, key_t, char*, char*);
void createSharedMem(int, key_t, char*, char*);
char* readFAT12Table(char*);
int main(int argc, char *argv[])
{
int fd[2], nbytes;
int i = 0, j = 0;
char *buffer;
int shmid;
key_t key;
char *shm, *s, *pch, tmp[9], *tmp_cur;
CUR_DIR = malloc(sizeof(Shared_Info));
// Read shared memory from terminal
getSharedMem(shmid, key, shm, s);
char *trunk[CUR_DIR->FLC];
if (argc != 2) {
usage();
return 1;
}
NEW_DIR = malloc(sizeof(argv));
strcpy(NEW_DIR, argv[argc - 1]);
FILE_SYSTEM_ID = fopen(CUR_DIR->img, "r+");
if (FILE_SYSTEM_ID == NULL)
{
fprintf(stderr, "Could not open the floppy drive or image.\n");
exit(1);
}
// Set it to this only to read the boot sector
BYTES_PER_SECTOR = BYTES_TO_READ_IN_BOOT_SECTOR;
buffer = (char*) malloc(32 * sizeof(char));
if (strcmp(NEW_DIR, "..") != 0) {
tmp_cur = malloc(sizeof(CUR_DIR->path));
strcpy(tmp_cur, CUR_DIR->path);
i = 0;
pch = strtok(tmp_cur, " /");
while (pch != NULL) {
trunk[i] = pch;
pch = strtok(NULL, " /");
i++;
}
if (strcmp(CUR_DIR->path, "/") == 0)
buffer = readFile(buffer, trunk); // Read directory sectors into buffer
else
buffer = readFileNonRoot(buffer);
} else {
tmp_cur = malloc(sizeof(CUR_DIR->path));
strcpy(tmp_cur, CUR_DIR->path);
pch = strtok(tmp_cur, " /");
while (pch != NULL) {
strcpy(tmp, pch);
pch = strtok(NULL, " /");
}
strcpy(tmp_cur, CUR_DIR->path);
strcpy(CUR_DIR->path, "/");
pch = strtok(tmp_cur, " /");
while (strcmp(pch, tmp) != 0) {
strcat(CUR_DIR->path, pch);
strcat(CUR_DIR->path, "/");
pch = strtok(NULL, " /");
}
CUR_DIR->FLC = CUR_DIR->OLD_FLC;
}
// Free data
free(buffer);
buffer = NULL;
fclose(FILE_SYSTEM_ID);
// Push new shared data to terminal
createSharedMem(shmid, key, shm, s);
exit(EXIT_SUCCESS);
}
char* readFAT12Table(char* buffer) {
int i = 0;
// Read in FAT table to buffer
for (i = 1; i <= 9; i++) {
if (read_sector(i, (buffer + BYTES_PER_SECTOR * (i - 1))) == -1) {
fprintf(stderr, "Something has gone wrong -- could not read the sector\n");
}
}
return buffer;
}
void createSharedMem(int shmid, key_t key, char * shm, char * s) {
key = 5678;
int i = 0;
Shared_Info* tmp;
if ((shmid = shmget(key, sizeof(Shared_Info*), IPC_CREAT | 0666)) < 0) {
perror("shmget");
exit(1);
}
if ((tmp = shmat(shmid, NULL, 0)) == (Shared_Info*) -1) {
perror("shmat");
exit(1);
}
tmp = CUR_DIR;
tmp = NULL;
//s = shm;
// Write current path to shared memory
//for (i = 0; i < sizeof(CUR_DIR->path); i++)
// *s++ = CUR_DIR->path[i];
//*s = NULL;
}
void getSharedMem(int shmid, key_t key, char * shm, char * s) {
key = 5678;
int i = 0;
Shared_Info* tmp;
if ((shmid = shmget(key, sizeof(Shared_Info*), 0666)) < 0) {
perror("shmget");
exit(1);
}
if ((tmp = shmat(shmid, NULL, 0)) == (Shared_Info*) -1) {
perror("shmat");
exit(1);
}
CUR_DIR = tmp;
//i = 0;
//for(s = shm; *s != NULL; s++) {
// CUR_DIR->path[i] = *s;
// i++;
//}
}
char* readFileNonRoot(char* buffer) {
int fat, start_sector = CUR_DIR->FLC, i = 0, j = 0, attrib, tmp1, tmp2;
// Allocate memory
char *sector = (char*) malloc(BYTES_PER_SECTOR * sizeof(char));
char *fat_buffer = (char*) malloc(9 * BYTES_PER_SECTOR * sizeof(char));
char tmp[9];
fat_buffer = readFAT12Table(fat_buffer); // Read fat table into buffer
while (1) {
if (read_sector(start_sector, sector) == -1) { // Read data sector
fprintf(stderr, "Something has gone wrong -- could not read the boot sector\n");
}
i = 0;
j = 0;
while (i < 512) {
for (j = 0; j < 32; j++)
buffer[j] = sector[j + i];
attrib = ( (int) buffer[11] );
// if is subdir
if (attrib == 16) {
// read subdir name into tmp
for (j = 0; j < 8; j++)
tmp[j] = buffer[j];
tmp[strlen(NEW_DIR)] = '\0';
NEW_DIR[strlen(NEW_DIR)] = '\0';
// compare
if (strcmp(NEW_DIR, tmp) == 0) {
strcat(CUR_DIR->path, NEW_DIR);
strcat(CUR_DIR->path, "/");
tmp1 = ( ( (int) buffer[27] ) << 8 ) & 0x0000ff00;
tmp2 = ( (int) buffer[26] ) & 0x000000ff;
CUR_DIR->FLC = tmp1 | tmp2;
CUR_DIR->FLC += 33 - 2;
break;
}
}
i += 32;
}
//printf("\n");
//printf("%s\n", sector); // Display data sector contents
fat = get_fat_entry(start_sector - 33 + 2, fat_buffer); // Get fat entrie from table buffer
if (fat >= 4088 && fat <= 4095) { // Last cluster in file
break;
} else if (fat >= 4080 && fat <= 4086) { // Reserved cluster
break;
} else if (fat == 0 || fat == 4087) { // Unused OR Bad cluster
break;
} else { // Next cluster in file
start_sector = fat + 33 - 2;
}
}
// Free memory
free(sector);
free(buffer);
sector = NULL;
buffer = NULL;
}
char* readFile(char* buffer, char* trunk[]) {
int i = 0, j = 0, bytes_read, attrib, tmp1, tmp2;
char tmp[9];
fpos_t position;
i = CUR_DIR->FLC * 512;
fseek(FILE_SYSTEM_ID, i, SEEK_SET); // Set pos to beginning file
while (1) {
bytes_read = fread(buffer, sizeof(char), 32, FILE_SYSTEM_ID); // read 32 bytes at a time into buffer
attrib = ( (int) buffer[11] );
// if is subdir
if (attrib == 16) {
// read subdir name into tmp
for (j = 0; j < 8; j++)
tmp[j] = buffer[j];
tmp[strlen(NEW_DIR)] = '\0';
NEW_DIR[strlen(NEW_DIR)] = '\0';
// compare
if (strcmp(NEW_DIR, tmp) == 0) {
strcat(CUR_DIR->path, NEW_DIR);
strcat(CUR_DIR->path, "/");
tmp1 = ( ( (int) buffer[27] ) << 8 ) & 0x0000ff00;
tmp2 = ( (int) buffer[26] ) & 0x000000ff;
CUR_DIR->FLC = tmp1 | tmp2;
CUR_DIR->FLC += 33 - 2;
break;
}
}
if (i >= 32 * 512) { // If i has incrimented to the end of the root dir break
fprintf(stderr, "ERROR: directory '%s' does not exist\n", NEW_DIR);
break;
}
i = i + 32;
}
return buffer;
}
void usage() {
printf("\nusage: cd (/path/to/SUBDIR)\n\n");
}
| pappacurds/csi385-FAT12-OS | package/cd/cd.c | C | gpl-2.0 | 7,028 |
/*
* Copyright (C) 2010 Xavier Claessens <xclaesse@gmail.com>
* Copyright (C) 2010 Collabora Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <stdlib.h>
#include <gio/gio.h>
#include <telepathy-glib/telepathy-glib.h>
static GMainLoop *loop = NULL;
static GList *channel_list = NULL;
static void
channel_invalidated_cb (TpChannel *channel,
guint domain,
gint code,
gchar *message,
gpointer user_data)
{
channel_list = g_list_remove (channel_list, channel);
g_object_unref (channel);
if (channel_list == NULL)
g_main_loop_quit (loop);
}
static void
session_complete (TpChannel *channel, const GError *error)
{
if (error != NULL)
{
g_debug ("Error for channel %p: %s", channel,
error ? error->message : "No error message");
}
tp_channel_close_async (channel, NULL, NULL);
}
static void
splice_cb (GObject *source_object,
GAsyncResult *res,
gpointer channel)
{
GError *error = NULL;
g_io_stream_splice_finish (res, &error);
session_complete (channel, error);
g_clear_error (&error);
}
static void
accept_tube_cb (GObject *object,
GAsyncResult *res,
gpointer user_data)
{
TpChannel *channel = TP_CHANNEL (object);
TpStreamTubeConnection *stc;
GInetAddress *inet_address = NULL;
GSocketAddress *socket_address = NULL;
GSocket *socket = NULL;
GSocketConnection *tube_connection = NULL;
GSocketConnection *sshd_connection = NULL;
GError *error = NULL;
stc = tp_stream_tube_channel_accept_finish (TP_STREAM_TUBE_CHANNEL (channel),
res, &error);
if (stc == NULL)
goto OUT;
tube_connection = tp_stream_tube_connection_get_socket_connection (stc);
/* Connect to the sshd */
inet_address = g_inet_address_new_loopback (G_SOCKET_FAMILY_IPV4);
socket_address = g_inet_socket_address_new (inet_address, 22);
socket = g_socket_new (G_SOCKET_FAMILY_IPV4, G_SOCKET_TYPE_STREAM,
G_SOCKET_PROTOCOL_DEFAULT, &error);
if (socket == NULL)
goto OUT;
if (!g_socket_connect (socket, socket_address, NULL, &error))
goto OUT;
sshd_connection = g_socket_connection_factory_create_connection (socket);
/* Splice tube and ssh connections */
g_io_stream_splice_async (G_IO_STREAM (tube_connection),
G_IO_STREAM (sshd_connection), G_IO_STREAM_SPLICE_NONE,
G_PRIORITY_DEFAULT, NULL, splice_cb, channel);
OUT:
if (error != NULL)
session_complete (channel, error);
tp_clear_object (&stc);
tp_clear_object (&inet_address);
tp_clear_object (&socket_address);
tp_clear_object (&socket);
tp_clear_object (&sshd_connection);
g_clear_error (&error);
}
static void
got_channel_cb (TpSimpleHandler *handler,
TpAccount *account,
TpConnection *connection,
GList *channels,
GList *requests_satisfied,
gint64 user_action_time,
TpHandleChannelsContext *context,
gpointer user_data)
{
GList *l;
for (l = channels; l != NULL; l = l->next)
{
if (TP_IS_STREAM_TUBE_CHANNEL (l->data))
{
TpStreamTubeChannel *channel = l->data;
channel_list = g_list_prepend (channel_list, g_object_ref (channel));
g_signal_connect (channel, "invalidated",
G_CALLBACK (channel_invalidated_cb), NULL);
tp_stream_tube_channel_accept_async (channel, accept_tube_cb, NULL);
}
}
tp_handle_channels_context_accept (context);
}
int
main (gint argc, gchar *argv[])
{
TpDBusDaemon *dbus = NULL;
TpSimpleClientFactory *factory = NULL;
TpBaseClient *client = NULL;
gboolean success = TRUE;
GError *error = NULL;
g_type_init ();
tp_debug_set_flags (g_getenv ("SSH_CONTACT_DEBUG"));
dbus = tp_dbus_daemon_dup (&error);
if (dbus == NULL)
goto OUT;
factory = (TpSimpleClientFactory *) tp_automatic_client_factory_new (dbus);
client = tp_simple_handler_new_with_factory (factory, FALSE, FALSE,
"SSHContact", FALSE, got_channel_cb, NULL, NULL);
tp_base_client_take_handler_filter (client, tp_asv_new (
TP_PROP_CHANNEL_CHANNEL_TYPE, G_TYPE_STRING,
TP_IFACE_CHANNEL_TYPE_STREAM_TUBE,
TP_PROP_CHANNEL_TARGET_HANDLE_TYPE, G_TYPE_UINT,
TP_HANDLE_TYPE_CONTACT,
TP_PROP_CHANNEL_TYPE_STREAM_TUBE_SERVICE, G_TYPE_STRING,
TUBE_SERVICE,
TP_PROP_CHANNEL_REQUESTED, G_TYPE_BOOLEAN,
FALSE,
NULL));
if (!tp_base_client_register (client, &error))
goto OUT;
loop = g_main_loop_new (NULL, FALSE);
g_main_loop_run (loop);
OUT:
if (error != NULL)
{
g_debug ("Error: %s", error->message);
success = FALSE;
}
tp_clear_pointer (&loop, g_main_loop_unref);
tp_clear_object (&dbus);
tp_clear_object (&factory);
tp_clear_object (&client);
g_clear_error (&error);
return success ? EXIT_SUCCESS : EXIT_FAILURE;
}
| freedesktop-unofficial-mirror/telepathy__telepathy-ssh-contact | src/service.c | C | gpl-2.0 | 5,471 |
/*****************************************************************************
* macroblock.c: macroblock encoding
*****************************************************************************
* Copyright (C) 2003-2017 x264 project
*
* Authors: Laurent Aimar <fenrir@via.ecp.fr>
* Loren Merritt <lorenm@u.washington.edu>
* Fiona Glaser <fiona@x264.com>
* Henrik Gramner <henrik@gramner.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "common/common.h"
#include "macroblock.h"
/* These chroma DC functions don't have assembly versions and are only used here. */
#define ZIG(i,y,x) level[i] = dct[x*2+y];
static inline void zigzag_scan_2x2_dc( dctcoef level[4], dctcoef dct[4] )
{
ZIG(0,0,0)
ZIG(1,0,1)
ZIG(2,1,0)
ZIG(3,1,1)
}
#undef ZIG
static inline void zigzag_scan_2x4_dc( dctcoef level[8], dctcoef dct[8] )
{
level[0] = dct[0];
level[1] = dct[2];
level[2] = dct[1];
level[3] = dct[4];
level[4] = dct[6];
level[5] = dct[3];
level[6] = dct[5];
level[7] = dct[7];
}
#define IDCT_DEQUANT_2X2_START \
int d0 = dct[0] + dct[1]; \
int d1 = dct[2] + dct[3]; \
int d2 = dct[0] - dct[1]; \
int d3 = dct[2] - dct[3]; \
int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
static inline void idct_dequant_2x2_dc( dctcoef dct[4], dctcoef dct4x4[4][16], int dequant_mf[6][16], int i_qp )
{
IDCT_DEQUANT_2X2_START
dct4x4[0][0] = (d0 + d1) * dmf >> 5;
dct4x4[1][0] = (d0 - d1) * dmf >> 5;
dct4x4[2][0] = (d2 + d3) * dmf >> 5;
dct4x4[3][0] = (d2 - d3) * dmf >> 5;
}
static inline void idct_dequant_2x2_dconly( dctcoef dct[4], int dequant_mf[6][16], int i_qp )
{
IDCT_DEQUANT_2X2_START
dct[0] = (d0 + d1) * dmf >> 5;
dct[1] = (d0 - d1) * dmf >> 5;
dct[2] = (d2 + d3) * dmf >> 5;
dct[3] = (d2 - d3) * dmf >> 5;
}
#undef IDCT_2X2_DEQUANT_START
static inline void dct2x2dc( dctcoef d[4], dctcoef dct4x4[4][16] )
{
int d0 = dct4x4[0][0] + dct4x4[1][0];
int d1 = dct4x4[2][0] + dct4x4[3][0];
int d2 = dct4x4[0][0] - dct4x4[1][0];
int d3 = dct4x4[2][0] - dct4x4[3][0];
d[0] = d0 + d1;
d[2] = d2 + d3;
d[1] = d0 - d1;
d[3] = d2 - d3;
dct4x4[0][0] = 0;
dct4x4[1][0] = 0;
dct4x4[2][0] = 0;
dct4x4[3][0] = 0;
}
static ALWAYS_INLINE int array_non_zero( dctcoef *v, int i_count )
{
if( WORD_SIZE == 8 )
{
for( int i = 0; i < i_count; i += 8/sizeof(dctcoef) )
if( M64( &v[i] ) )
return 1;
}
else
{
for( int i = 0; i < i_count; i += 4/sizeof(dctcoef) )
if( M32( &v[i] ) )
return 1;
}
return 0;
}
/* All encoding functions must output the correct CBP and NNZ values.
* The entropy coding functions will check CBP first, then NNZ, before
* actually reading the DCT coefficients. NNZ still must be correct even
* if CBP is zero because of the use of NNZ values for context selection.
* "NNZ" need only be 0 or 1 rather than the exact coefficient count because
* that is only needed in CAVLC, and will be calculated by CAVLC's residual
* coding and stored as necessary. */
/* This means that decimation can be done merely by adjusting the CBP and NNZ
* rather than memsetting the coefficients. */
static void x264_mb_encode_i16x16( x264_t *h, int p, int i_qp )
{
pixel *p_src = h->mb.pic.p_fenc[p];
pixel *p_dst = h->mb.pic.p_fdec[p];
ALIGNED_ARRAY_32( dctcoef, dct4x4,[16],[16] );
ALIGNED_ARRAY_32( dctcoef, dct_dc4x4,[16] );
int nz, block_cbp = 0;
int decimate_score = h->mb.b_dct_decimate ? 0 : 9;
int i_quant_cat = p ? CQM_4IC : CQM_4IY;
int i_mode = h->mb.i_intra16x16_pred_mode;
if( h->mb.b_lossless )
x264_predict_lossless_16x16( h, p, i_mode );
else
h->predict_16x16[i_mode]( h->mb.pic.p_fdec[p] );
if( h->mb.b_lossless )
{
for( int i = 0; i < 16; i++ )
{
int oe = block_idx_xy_fenc[i];
int od = block_idx_xy_fdec[i];
nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16*p+i], p_src+oe, p_dst+od, &dct_dc4x4[block_idx_yx_1d[i]] );
h->mb.cache.non_zero_count[x264_scan8[16*p+i]] = nz;
block_cbp |= nz;
}
h->mb.i_cbp_luma |= block_cbp * 0xf;
h->mb.cache.non_zero_count[x264_scan8[LUMA_DC+p]] = array_non_zero( dct_dc4x4, 16 );
h->zigzagf.scan_4x4( h->dct.luma16x16_dc[p], dct_dc4x4 );
return;
}
CLEAR_16x16_NNZ( p );
h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
if( h->mb.b_noise_reduction )
for( int idx = 0; idx < 16; idx++ )
h->quantf.denoise_dct( dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
for( int idx = 0; idx < 16; idx++ )
{
dct_dc4x4[block_idx_xy_1d[idx]] = dct4x4[idx][0];
dct4x4[idx][0] = 0;
}
if( h->mb.b_trellis )
{
for( int idx = 0; idx < 16; idx++ )
if( x264_quant_4x4_trellis( h, dct4x4[idx], i_quant_cat, i_qp, ctx_cat_plane[DCT_LUMA_AC][p], 1, !!p, idx ) )
{
block_cbp = 0xf;
h->zigzagf.scan_4x4( h->dct.luma4x4[16*p+idx], dct4x4[idx] );
h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[i_quant_cat], i_qp );
if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16*p+idx] );
h->mb.cache.non_zero_count[x264_scan8[16*p+idx]] = 1;
}
}
else
{
for( int i8x8 = 0; i8x8 < 4; i8x8++ )
{
nz = h->quantf.quant_4x4x4( &dct4x4[i8x8*4], h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
if( nz )
{
block_cbp = 0xf;
FOREACH_BIT( idx, i8x8*4, nz )
{
h->zigzagf.scan_4x4( h->dct.luma4x4[16*p+idx], dct4x4[idx] );
h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[i_quant_cat], i_qp );
if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16*p+idx] );
h->mb.cache.non_zero_count[x264_scan8[16*p+idx]] = 1;
}
}
}
}
/* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */
/* More useful with CAVLC, but still useful with CABAC. */
if( decimate_score < 6 )
{
CLEAR_16x16_NNZ( p );
block_cbp = 0;
}
else
h->mb.i_cbp_luma |= block_cbp;
h->dctf.dct4x4dc( dct_dc4x4 );
if( h->mb.b_trellis )
nz = x264_quant_luma_dc_trellis( h, dct_dc4x4, i_quant_cat, i_qp, ctx_cat_plane[DCT_LUMA_DC][p], 1, LUMA_DC+p );
else
nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[i_quant_cat][i_qp][0]>>1, h->quant4_bias[i_quant_cat][i_qp][0]<<1 );
h->mb.cache.non_zero_count[x264_scan8[LUMA_DC+p]] = nz;
if( nz )
{
h->zigzagf.scan_4x4( h->dct.luma16x16_dc[p], dct_dc4x4 );
/* output samples to fdec */
h->dctf.idct4x4dc( dct_dc4x4 );
h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[i_quant_cat], i_qp ); /* XXX not inversed */
if( block_cbp )
for( int i = 0; i < 16; i++ )
dct4x4[i][0] = dct_dc4x4[block_idx_xy_1d[i]];
}
/* put pixels to fdec */
if( block_cbp )
h->dctf.add16x16_idct( p_dst, dct4x4 );
else if( nz )
h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 );
}
/* Round down coefficients losslessly in DC-only chroma blocks.
* Unlike luma blocks, this can't be done with a lookup table or
* other shortcut technique because of the interdependencies
* between the coefficients due to the chroma DC transform. */
static ALWAYS_INLINE int x264_mb_optimize_chroma_dc( x264_t *h, dctcoef *dct_dc, int dequant_mf[6][16], int i_qp, int chroma422 )
{
int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
/* If the QP is too high, there's no benefit to rounding optimization. */
if( dmf > 32*64 )
return 1;
if( chroma422 )
return h->quantf.optimize_chroma_2x4_dc( dct_dc, dmf );
else
return h->quantf.optimize_chroma_2x2_dc( dct_dc, dmf );
}
static ALWAYS_INLINE void x264_mb_encode_chroma_internal( x264_t *h, int b_inter, int i_qp, int chroma422 )
{
int nz, nz_dc;
int b_decimate = b_inter && h->mb.b_dct_decimate;
int (*dequant_mf)[16] = h->dequant4_mf[CQM_4IC + b_inter];
ALIGNED_ARRAY_16( dctcoef, dct_dc,[8] );
h->mb.i_cbp_chroma = 0;
h->nr_count[2] += h->mb.b_noise_reduction * 4;
M16( &h->mb.cache.non_zero_count[x264_scan8[16]] ) = 0;
M16( &h->mb.cache.non_zero_count[x264_scan8[18]] ) = 0;
M16( &h->mb.cache.non_zero_count[x264_scan8[32]] ) = 0;
M16( &h->mb.cache.non_zero_count[x264_scan8[34]] ) = 0;
if( chroma422 )
{
M16( &h->mb.cache.non_zero_count[x264_scan8[24]] ) = 0;
M16( &h->mb.cache.non_zero_count[x264_scan8[26]] ) = 0;
M16( &h->mb.cache.non_zero_count[x264_scan8[40]] ) = 0;
M16( &h->mb.cache.non_zero_count[x264_scan8[42]] ) = 0;
}
/* Early termination: check variance of chroma residual before encoding.
* Don't bother trying early termination at low QPs.
* Values are experimentally derived. */
if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) && !h->mb.b_noise_reduction )
{
int thresh = chroma422 ? (x264_lambda2_tab[i_qp] + 16) >> 5 : (x264_lambda2_tab[i_qp] + 32) >> 6;
int ssd[2];
int chromapix = chroma422 ? PIXEL_8x16 : PIXEL_8x8;
int score = h->pixf.var2[chromapix]( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] );
if( score < thresh*4 )
score += h->pixf.var2[chromapix]( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] );
if( score < thresh*4 )
{
h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+0]] = 0;
h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+1]] = 0;
for( int ch = 0; ch < 2; ch++ )
{
if( ssd[ch] > thresh )
{
pixel *p_src = h->mb.pic.p_fenc[1+ch];
pixel *p_dst = h->mb.pic.p_fdec[1+ch];
if( chroma422 )
/* Cannot be replaced by two calls to sub8x8_dct_dc since the hadamard transform is different */
h->dctf.sub8x16_dct_dc( dct_dc, p_src, p_dst );
else
h->dctf.sub8x8_dct_dc( dct_dc, p_src, p_dst );
if( h->mb.b_trellis )
nz_dc = x264_quant_chroma_dc_trellis( h, dct_dc, i_qp+3*chroma422, !b_inter, CHROMA_DC+ch );
else
{
nz_dc = 0;
for( int i = 0; i <= chroma422; i++ )
nz_dc |= h->quantf.quant_2x2_dc( &dct_dc[4*i], h->quant4_mf[CQM_4IC+b_inter][i_qp+3*chroma422][0] >> 1,
h->quant4_bias[CQM_4IC+b_inter][i_qp+3*chroma422][0] << 1 );
}
if( nz_dc )
{
if( !x264_mb_optimize_chroma_dc( h, dct_dc, dequant_mf, i_qp+3*chroma422, chroma422 ) )
continue;
h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+ch]] = 1;
if( chroma422 )
{
zigzag_scan_2x4_dc( h->dct.chroma_dc[ch], dct_dc );
h->quantf.idct_dequant_2x4_dconly( dct_dc, dequant_mf, i_qp+3 );
}
else
{
zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct_dc );
idct_dequant_2x2_dconly( dct_dc, dequant_mf, i_qp );
}
for( int i = 0; i <= chroma422; i++ )
h->dctf.add8x8_idct_dc( p_dst + 8*i*FDEC_STRIDE, &dct_dc[4*i] );
h->mb.i_cbp_chroma = 1;
}
}
}
return;
}
}
for( int ch = 0; ch < 2; ch++ )
{
pixel *p_src = h->mb.pic.p_fenc[1+ch];
pixel *p_dst = h->mb.pic.p_fdec[1+ch];
int i_decimate_score = b_decimate ? 0 : 7;
int nz_ac = 0;
ALIGNED_ARRAY_32( dctcoef, dct4x4,[8],[16] );
if( h->mb.b_lossless )
{
static const uint8_t chroma422_scan[8] = { 0, 2, 1, 5, 3, 6, 4, 7 };
for( int i = 0; i < (chroma422?8:4); i++ )
{
int oe = 4*(i&1) + 4*(i>>1)*FENC_STRIDE;
int od = 4*(i&1) + 4*(i>>1)*FDEC_STRIDE;
nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+(chroma422?i&4:0)+ch*16], p_src+oe, p_dst+od,
&h->dct.chroma_dc[ch][chroma422?chroma422_scan[i]:i] );
h->mb.cache.non_zero_count[x264_scan8[16+i+(chroma422?i&4:0)+ch*16]] = nz;
h->mb.i_cbp_chroma |= nz;
}
h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+ch]] = array_non_zero( h->dct.chroma_dc[ch], chroma422?8:4 );
continue;
}
for( int i = 0; i <= chroma422; i++ )
h->dctf.sub8x8_dct( &dct4x4[4*i], p_src + 8*i*FENC_STRIDE, p_dst + 8*i*FDEC_STRIDE );
if( h->mb.b_noise_reduction )
for( int i = 0; i < (chroma422?8:4); i++ )
h->quantf.denoise_dct( dct4x4[i], h->nr_residual_sum[2], h->nr_offset[2], 16 );
if( chroma422 )
h->dctf.dct2x4dc( dct_dc, dct4x4 );
else
dct2x2dc( dct_dc, dct4x4 );
/* calculate dct coeffs */
for( int i8x8 = 0; i8x8 < (chroma422?2:1); i8x8++ )
{
if( h->mb.b_trellis )
{
for( int i4x4 = 0; i4x4 < 4; i4x4++ )
{
if( x264_quant_4x4_trellis( h, dct4x4[i8x8*4+i4x4], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 ) )
{
int idx = 16+ch*16+i8x8*8+i4x4;
h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[i8x8*4+i4x4] );
h->quantf.dequant_4x4( dct4x4[i8x8*4+i4x4], dequant_mf, i_qp );
if( i_decimate_score < 7 )
i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[idx] );
h->mb.cache.non_zero_count[x264_scan8[idx]] = 1;
nz_ac = 1;
}
}
}
else
{
nz = h->quantf.quant_4x4x4( &dct4x4[i8x8*4], h->quant4_mf[CQM_4IC+b_inter][i_qp],
h->quant4_bias[CQM_4IC+b_inter][i_qp] );
nz_ac |= nz;
FOREACH_BIT( i4x4, 0, nz )
{
int idx = 16+ch*16+i8x8*8+i4x4;
h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[i8x8*4+i4x4] );
h->quantf.dequant_4x4( dct4x4[i8x8*4+i4x4], dequant_mf, i_qp );
if( i_decimate_score < 7 )
i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[idx] );
h->mb.cache.non_zero_count[x264_scan8[idx]] = 1;
}
}
}
if( h->mb.b_trellis )
nz_dc = x264_quant_chroma_dc_trellis( h, dct_dc, i_qp+3*chroma422, !b_inter, CHROMA_DC+ch );
else
{
nz_dc = 0;
for( int i = 0; i <= chroma422; i++ )
nz_dc |= h->quantf.quant_2x2_dc( &dct_dc[4*i], h->quant4_mf[CQM_4IC+b_inter][i_qp+3*chroma422][0] >> 1,
h->quant4_bias[CQM_4IC+b_inter][i_qp+3*chroma422][0] << 1 );
}
h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+ch]] = nz_dc;
if( i_decimate_score < 7 || !nz_ac )
{
/* Decimate the block */
M16( &h->mb.cache.non_zero_count[x264_scan8[16+16*ch]] ) = 0;
M16( &h->mb.cache.non_zero_count[x264_scan8[18+16*ch]] ) = 0;
if( chroma422 )
{
M16( &h->mb.cache.non_zero_count[x264_scan8[24+16*ch]] ) = 0;
M16( &h->mb.cache.non_zero_count[x264_scan8[26+16*ch]] ) = 0;
}
if( !nz_dc ) /* Whole block is empty */
continue;
if( !x264_mb_optimize_chroma_dc( h, dct_dc, dequant_mf, i_qp+3*chroma422, chroma422 ) )
{
h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+ch]] = 0;
continue;
}
/* DC-only */
if( chroma422 )
{
zigzag_scan_2x4_dc( h->dct.chroma_dc[ch], dct_dc );
h->quantf.idct_dequant_2x4_dconly( dct_dc, dequant_mf, i_qp+3 );
}
else
{
zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct_dc );
idct_dequant_2x2_dconly( dct_dc, dequant_mf, i_qp );
}
for( int i = 0; i <= chroma422; i++ )
h->dctf.add8x8_idct_dc( p_dst + 8*i*FDEC_STRIDE, &dct_dc[4*i] );
}
else
{
h->mb.i_cbp_chroma = 1;
if( nz_dc )
{
if( chroma422 )
{
zigzag_scan_2x4_dc( h->dct.chroma_dc[ch], dct_dc );
h->quantf.idct_dequant_2x4_dc( dct_dc, dct4x4, dequant_mf, i_qp+3 );
}
else
{
zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct_dc );
idct_dequant_2x2_dc( dct_dc, dct4x4, dequant_mf, i_qp );
}
}
for( int i = 0; i <= chroma422; i++ )
h->dctf.add8x8_idct( p_dst + 8*i*FDEC_STRIDE, &dct4x4[4*i] );
}
}
/* 0 = none, 1 = DC only, 2 = DC+AC */
h->mb.i_cbp_chroma += (h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+0]] |
h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+1]] | h->mb.i_cbp_chroma);
}
void x264_mb_encode_chroma( x264_t *h, int b_inter, int i_qp )
{
if( CHROMA_FORMAT == CHROMA_420 )
x264_mb_encode_chroma_internal( h, b_inter, i_qp, 0 );
else
x264_mb_encode_chroma_internal( h, b_inter, i_qp, 1 );
}
static void x264_macroblock_encode_skip( x264_t *h )
{
M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = 0;
M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = 0;
M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = 0;
M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = 0;
M32( &h->mb.cache.non_zero_count[x264_scan8[16+ 0]] ) = 0;
M32( &h->mb.cache.non_zero_count[x264_scan8[16+ 2]] ) = 0;
M32( &h->mb.cache.non_zero_count[x264_scan8[32+ 0]] ) = 0;
M32( &h->mb.cache.non_zero_count[x264_scan8[32+ 2]] ) = 0;
if( CHROMA_FORMAT >= CHROMA_422 )
{
M32( &h->mb.cache.non_zero_count[x264_scan8[16+ 8]] ) = 0;
M32( &h->mb.cache.non_zero_count[x264_scan8[16+10]] ) = 0;
M32( &h->mb.cache.non_zero_count[x264_scan8[32+ 8]] ) = 0;
M32( &h->mb.cache.non_zero_count[x264_scan8[32+10]] ) = 0;
}
h->mb.i_cbp_luma = 0;
h->mb.i_cbp_chroma = 0;
h->mb.cbp[h->mb.i_mb_xy] = 0;
}
/*****************************************************************************
* Intra prediction for predictive lossless mode.
*****************************************************************************/
void x264_predict_lossless_chroma( x264_t *h, int i_mode )
{
int height = 16 >> CHROMA_V_SHIFT;
if( i_mode == I_PRED_CHROMA_V )
{
h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1]-FENC_STRIDE, FENC_STRIDE, height );
h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2]-FENC_STRIDE, FENC_STRIDE, height );
memcpy( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[1]-FDEC_STRIDE, 8*sizeof(pixel) );
memcpy( h->mb.pic.p_fdec[2], h->mb.pic.p_fdec[2]-FDEC_STRIDE, 8*sizeof(pixel) );
}
else if( i_mode == I_PRED_CHROMA_H )
{
h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1]-1, FENC_STRIDE, height );
h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2]-1, FENC_STRIDE, height );
x264_copy_column8( h->mb.pic.p_fdec[1]+4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+4*FDEC_STRIDE-1 );
x264_copy_column8( h->mb.pic.p_fdec[2]+4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+4*FDEC_STRIDE-1 );
if( CHROMA_FORMAT == CHROMA_422 )
{
x264_copy_column8( h->mb.pic.p_fdec[1]+12*FDEC_STRIDE, h->mb.pic.p_fdec[1]+12*FDEC_STRIDE-1 );
x264_copy_column8( h->mb.pic.p_fdec[2]+12*FDEC_STRIDE, h->mb.pic.p_fdec[2]+12*FDEC_STRIDE-1 );
}
}
else
{
h->predict_chroma[i_mode]( h->mb.pic.p_fdec[1] );
h->predict_chroma[i_mode]( h->mb.pic.p_fdec[2] );
}
}
void x264_predict_lossless_4x4( x264_t *h, pixel *p_dst, int p, int idx, int i_mode )
{
int stride = h->fenc->i_stride[p] << MB_INTERLACED;
pixel *p_src = h->mb.pic.p_fenc_plane[p] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
if( i_mode == I_PRED_4x4_V )
h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
else if( i_mode == I_PRED_4x4_H )
h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
else
h->predict_4x4[i_mode]( p_dst );
}
void x264_predict_lossless_8x8( x264_t *h, pixel *p_dst, int p, int idx, int i_mode, pixel edge[36] )
{
int stride = h->fenc->i_stride[p] << MB_INTERLACED;
pixel *p_src = h->mb.pic.p_fenc_plane[p] + (idx&1)*8 + (idx>>1)*8*stride;
if( i_mode == I_PRED_8x8_V )
h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
else if( i_mode == I_PRED_8x8_H )
h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
else
h->predict_8x8[i_mode]( p_dst, edge );
}
void x264_predict_lossless_16x16( x264_t *h, int p, int i_mode )
{
int stride = h->fenc->i_stride[p] << MB_INTERLACED;
if( i_mode == I_PRED_16x16_V )
h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[p], FDEC_STRIDE, h->mb.pic.p_fenc_plane[p]-stride, stride, 16 );
else if( i_mode == I_PRED_16x16_H )
h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[p], FDEC_STRIDE, h->mb.pic.p_fenc_plane[p]-1, stride, 16 );
else
h->predict_16x16[i_mode]( h->mb.pic.p_fdec[p] );
}
/*****************************************************************************
* x264_macroblock_encode:
*****************************************************************************/
static ALWAYS_INLINE void x264_macroblock_encode_internal( x264_t *h, int plane_count, int chroma )
{
int i_qp = h->mb.i_qp;
int b_decimate = h->mb.b_dct_decimate;
int b_force_no_skip = 0;
int nz;
h->mb.i_cbp_luma = 0;
for( int p = 0; p < plane_count; p++ )
h->mb.cache.non_zero_count[x264_scan8[LUMA_DC+p]] = 0;
if( h->mb.i_type == I_PCM )
{
/* if PCM is chosen, we need to store reconstructed frame data */
for( int p = 0; p < plane_count; p++ )
h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[p], FDEC_STRIDE, h->mb.pic.p_fenc[p], FENC_STRIDE, 16 );
if( chroma )
{
int height = 16 >> CHROMA_V_SHIFT;
h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1], FENC_STRIDE, height );
h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2], FENC_STRIDE, height );
}
return;
}
if( !h->mb.b_allow_skip )
{
b_force_no_skip = 1;
if( IS_SKIP(h->mb.i_type) )
{
if( h->mb.i_type == P_SKIP )
h->mb.i_type = P_L0;
else if( h->mb.i_type == B_SKIP )
h->mb.i_type = B_DIRECT;
}
}
if( h->mb.i_type == P_SKIP )
{
/* don't do pskip motion compensation if it was already done in macroblock_analyse */
if( !h->mb.b_skip_mc )
{
int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
h->mb.mv_min[0], h->mb.mv_max[0] );
int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
h->mb.mv_min[1], h->mb.mv_max[1] );
for( int p = 0; p < plane_count; p++ )
h->mc.mc_luma( h->mb.pic.p_fdec[p], FDEC_STRIDE,
&h->mb.pic.p_fref[0][0][p*4], h->mb.pic.i_stride[p],
mvx, mvy, 16, 16, &h->sh.weight[0][p] );
if( chroma )
{
int v_shift = CHROMA_V_SHIFT;
int height = 16 >> v_shift;
/* Special case for mv0, which is (of course) very common in P-skip mode. */
if( mvx | mvy )
h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE,
h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
mvx, 2*mvy>>v_shift, 8, height );
else
h->mc.load_deinterleave_chroma_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4],
h->mb.pic.i_stride[1], height );
if( h->sh.weight[0][1].weightfn )
h->sh.weight[0][1].weightfn[8>>2]( h->mb.pic.p_fdec[1], FDEC_STRIDE,
h->mb.pic.p_fdec[1], FDEC_STRIDE,
&h->sh.weight[0][1], height );
if( h->sh.weight[0][2].weightfn )
h->sh.weight[0][2].weightfn[8>>2]( h->mb.pic.p_fdec[2], FDEC_STRIDE,
h->mb.pic.p_fdec[2], FDEC_STRIDE,
&h->sh.weight[0][2], height );
}
}
x264_macroblock_encode_skip( h );
return;
}
if( h->mb.i_type == B_SKIP )
{
/* don't do bskip motion compensation if it was already done in macroblock_analyse */
if( !h->mb.b_skip_mc )
x264_mb_mc( h );
x264_macroblock_encode_skip( h );
return;
}
if( h->mb.i_type == I_16x16 )
{
h->mb.b_transform_8x8 = 0;
for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
x264_mb_encode_i16x16( h, p, i_qp );
}
else if( h->mb.i_type == I_8x8 )
{
h->mb.b_transform_8x8 = 1;
/* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
if( h->mb.i_skip_intra )
{
h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i8x8_nnz_buf[0];
M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i8x8_nnz_buf[1];
M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i8x8_nnz_buf[2];
M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i8x8_nnz_buf[3];
h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
/* In RD mode, restore the now-overwritten DCT data. */
if( h->mb.i_skip_intra == 2 )
h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
}
for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
{
for( int i = (p == 0 && h->mb.i_skip_intra) ? 3 : 0; i < 4; i++ )
{
int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
x264_mb_encode_i8x8( h, p, i, i_qp, i_mode, NULL, 1 );
}
}
}
else if( h->mb.i_type == I_4x4 )
{
h->mb.b_transform_8x8 = 0;
/* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
if( h->mb.i_skip_intra )
{
h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i4x4_nnz_buf[0];
M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i4x4_nnz_buf[1];
M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i4x4_nnz_buf[2];
M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i4x4_nnz_buf[3];
h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
/* In RD mode, restore the now-overwritten DCT data. */
if( h->mb.i_skip_intra == 2 )
h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
}
for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
{
for( int i = (p == 0 && h->mb.i_skip_intra) ? 15 : 0; i < 16; i++ )
{
pixel *p_dst = &h->mb.pic.p_fdec[p][block_idx_xy_fdec[i]];
int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
/* emulate missing topright samples */
MPIXEL_X4( &p_dst[4-FDEC_STRIDE] ) = PIXEL_SPLAT_X4( p_dst[3-FDEC_STRIDE] );
x264_mb_encode_i4x4( h, p, i, i_qp, i_mode, 1 );
}
}
}
else /* Inter MB */
{
int i_decimate_mb = 0;
/* Don't repeat motion compensation if it was already done in non-RD transform analysis */
if( !h->mb.b_skip_mc )
x264_mb_mc( h );
if( h->mb.b_lossless )
{
if( h->mb.b_transform_8x8 )
for( int p = 0; p < plane_count; p++ )
for( int i8x8 = 0; i8x8 < 4; i8x8++ )
{
int x = i8x8&1;
int y = i8x8>>1;
nz = h->zigzagf.sub_8x8( h->dct.luma8x8[p*4+i8x8], h->mb.pic.p_fenc[p] + 8*x + 8*y*FENC_STRIDE,
h->mb.pic.p_fdec[p] + 8*x + 8*y*FDEC_STRIDE );
STORE_8x8_NNZ( p, i8x8, nz );
h->mb.i_cbp_luma |= nz << i8x8;
}
else
for( int p = 0; p < plane_count; p++ )
for( int i4x4 = 0; i4x4 < 16; i4x4++ )
{
nz = h->zigzagf.sub_4x4( h->dct.luma4x4[p*16+i4x4],
h->mb.pic.p_fenc[p]+block_idx_xy_fenc[i4x4],
h->mb.pic.p_fdec[p]+block_idx_xy_fdec[i4x4] );
h->mb.cache.non_zero_count[x264_scan8[p*16+i4x4]] = nz;
h->mb.i_cbp_luma |= nz << (i4x4>>2);
}
}
else if( h->mb.b_transform_8x8 )
{
ALIGNED_ARRAY_32( dctcoef, dct8x8,[4],[64] );
b_decimate &= !h->mb.b_trellis || !h->param.b_cabac; // 8x8 trellis is inherently optimal decimation for CABAC
for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
{
int quant_cat = p ? CQM_8PC : CQM_8PY;
CLEAR_16x16_NNZ( p );
h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[p], h->mb.pic.p_fdec[p] );
h->nr_count[1+!!p*2] += h->mb.b_noise_reduction * 4;
int plane_cbp = 0;
for( int idx = 0; idx < 4; idx++ )
{
nz = x264_quant_8x8( h, dct8x8[idx], i_qp, ctx_cat_plane[DCT_LUMA_8x8][p], 0, p, idx );
if( nz )
{
h->zigzagf.scan_8x8( h->dct.luma8x8[p*4+idx], dct8x8[idx] );
if( b_decimate )
{
int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[p*4+idx] );
i_decimate_mb += i_decimate_8x8;
if( i_decimate_8x8 >= 4 )
plane_cbp |= 1<<idx;
}
else
plane_cbp |= 1<<idx;
}
}
if( i_decimate_mb >= 6 || !b_decimate )
{
h->mb.i_cbp_luma |= plane_cbp;
FOREACH_BIT( idx, 0, plane_cbp )
{
h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[quant_cat], i_qp );
h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[p][8*(idx&1) + 8*(idx>>1)*FDEC_STRIDE], dct8x8[idx] );
STORE_8x8_NNZ( p, idx, 1 );
}
}
}
}
else
{
ALIGNED_ARRAY_32( dctcoef, dct4x4,[16],[16] );
for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
{
int quant_cat = p ? CQM_4PC : CQM_4PY;
CLEAR_16x16_NNZ( p );
h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[p], h->mb.pic.p_fdec[p] );
if( h->mb.b_noise_reduction )
{
h->nr_count[0+!!p*2] += 16;
for( int idx = 0; idx < 16; idx++ )
h->quantf.denoise_dct( dct4x4[idx], h->nr_residual_sum[0+!!p*2], h->nr_offset[0+!!p*2], 16 );
}
int plane_cbp = 0;
for( int i8x8 = 0; i8x8 < 4; i8x8++ )
{
int i_decimate_8x8 = b_decimate ? 0 : 6;
int nnz8x8 = 0;
if( h->mb.b_trellis )
{
for( int i4x4 = 0; i4x4 < 4; i4x4++ )
{
int idx = i8x8*4+i4x4;
if( x264_quant_4x4_trellis( h, dct4x4[idx], quant_cat, i_qp, ctx_cat_plane[DCT_LUMA_4x4][p], 0, !!p, p*16+idx ) )
{
h->zigzagf.scan_4x4( h->dct.luma4x4[p*16+idx], dct4x4[idx] );
h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[quant_cat], i_qp );
if( i_decimate_8x8 < 6 )
i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[p*16+idx] );
h->mb.cache.non_zero_count[x264_scan8[p*16+idx]] = 1;
nnz8x8 = 1;
}
}
}
else
{
nnz8x8 = nz = h->quantf.quant_4x4x4( &dct4x4[i8x8*4], h->quant4_mf[quant_cat][i_qp], h->quant4_bias[quant_cat][i_qp] );
if( nz )
{
FOREACH_BIT( idx, i8x8*4, nz )
{
h->zigzagf.scan_4x4( h->dct.luma4x4[p*16+idx], dct4x4[idx] );
h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[quant_cat], i_qp );
if( i_decimate_8x8 < 6 )
i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[p*16+idx] );
h->mb.cache.non_zero_count[x264_scan8[p*16+idx]] = 1;
}
}
}
if( nnz8x8 )
{
i_decimate_mb += i_decimate_8x8;
if( i_decimate_8x8 < 4 )
STORE_8x8_NNZ( p, i8x8, 0 );
else
plane_cbp |= 1<<i8x8;
}
}
if( i_decimate_mb < 6 )
{
plane_cbp = 0;
CLEAR_16x16_NNZ( p );
}
else
{
h->mb.i_cbp_luma |= plane_cbp;
FOREACH_BIT( i8x8, 0, plane_cbp )
{
h->dctf.add8x8_idct( &h->mb.pic.p_fdec[p][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
}
}
}
}
}
/* encode chroma */
if( chroma )
{
if( IS_INTRA( h->mb.i_type ) )
{
int i_mode = h->mb.i_chroma_pred_mode;
if( h->mb.b_lossless )
x264_predict_lossless_chroma( h, i_mode );
else
{
h->predict_chroma[i_mode]( h->mb.pic.p_fdec[1] );
h->predict_chroma[i_mode]( h->mb.pic.p_fdec[2] );
}
}
/* encode the 8x8 blocks */
x264_mb_encode_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
}
else
h->mb.i_cbp_chroma = 0;
/* store cbp */
int cbp = h->mb.i_cbp_chroma << 4 | h->mb.i_cbp_luma;
if( h->param.b_cabac )
cbp |= h->mb.cache.non_zero_count[x264_scan8[LUMA_DC ]] << 8
| h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+0]] << 9
| h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+1]] << 10;
h->mb.cbp[h->mb.i_mb_xy] = cbp;
/* Check for P_SKIP
* XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
* (if multiple mv give same result)*/
if( !b_force_no_skip )
{
if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
!(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
M32( h->mb.cache.mv[0][x264_scan8[0]] ) == M32( h->mb.cache.pskip_mv )
&& h->mb.cache.ref[0][x264_scan8[0]] == 0 )
{
h->mb.i_type = P_SKIP;
}
/* Check for B_SKIP */
if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
{
h->mb.i_type = B_SKIP;
}
}
}
void x264_macroblock_encode( x264_t *h )
{
if( CHROMA444 )
x264_macroblock_encode_internal( h, 3, 0 );
else
x264_macroblock_encode_internal( h, 1, 1 );
}
/*****************************************************************************
* x264_macroblock_probe_skip:
* Check if the current MB could be encoded as a [PB]_SKIP
*****************************************************************************/
static ALWAYS_INLINE int x264_macroblock_probe_skip_internal( x264_t *h, int b_bidir, int plane_count, int chroma )
{
ALIGNED_ARRAY_32( dctcoef, dct4x4,[8],[16] );
ALIGNED_ARRAY_16( dctcoef, dctscan,[16] );
ALIGNED_4( int16_t mvp[2] );
int i_qp = h->mb.i_qp;
for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
{
int quant_cat = p ? CQM_4PC : CQM_4PY;
if( !b_bidir )
{
/* Get the MV */
mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
/* Motion compensation */
h->mc.mc_luma( h->mb.pic.p_fdec[p], FDEC_STRIDE,
&h->mb.pic.p_fref[0][0][p*4], h->mb.pic.i_stride[p],
mvp[0], mvp[1], 16, 16, &h->sh.weight[0][p] );
}
for( int i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
{
int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[p] + fenc_offset,
h->mb.pic.p_fdec[p] + fdec_offset );
if( h->mb.b_noise_reduction )
for( int i4x4 = 0; i4x4 < 4; i4x4++ )
h->quantf.denoise_dct( dct4x4[i4x4], h->nr_residual_sum[0+!!p*2], h->nr_offset[0+!!p*2], 16 );
int nz = h->quantf.quant_4x4x4( dct4x4, h->quant4_mf[quant_cat][i_qp], h->quant4_bias[quant_cat][i_qp] );
FOREACH_BIT( idx, 0, nz )
{
h->zigzagf.scan_4x4( dctscan, dct4x4[idx] );
i_decimate_mb += h->quantf.decimate_score16( dctscan );
if( i_decimate_mb >= 6 )
return 0;
}
}
}
if( chroma == CHROMA_420 || chroma == CHROMA_422 )
{
i_qp = h->mb.i_chroma_qp;
int chroma422 = chroma == CHROMA_422;
int thresh = chroma422 ? (x264_lambda2_tab[i_qp] + 16) >> 5 : (x264_lambda2_tab[i_qp] + 32) >> 6;
int ssd;
ALIGNED_ARRAY_16( dctcoef, dct_dc,[8] );
if( !b_bidir )
{
/* Special case for mv0, which is (of course) very common in P-skip mode. */
if( M32( mvp ) )
h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE,
h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
mvp[0], mvp[1]<<chroma422, 8, chroma422?16:8 );
else
h->mc.load_deinterleave_chroma_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4],
h->mb.pic.i_stride[1], chroma422?16:8 );
}
for( int ch = 0; ch < 2; ch++ )
{
pixel *p_src = h->mb.pic.p_fenc[1+ch];
pixel *p_dst = h->mb.pic.p_fdec[1+ch];
if( !b_bidir && h->sh.weight[0][1+ch].weightfn )
h->sh.weight[0][1+ch].weightfn[8>>2]( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
&h->sh.weight[0][1+ch], chroma422?16:8 );
/* there is almost never a termination during chroma, but we can't avoid the check entirely */
/* so instead we check SSD and skip the actual check if the score is low enough. */
ssd = h->pixf.ssd[chroma422?PIXEL_8x16:PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
if( ssd < thresh )
continue;
/* The vast majority of chroma checks will terminate during the DC check or the higher
* threshold check, so we can save time by doing a DC-only DCT. */
if( h->mb.b_noise_reduction )
{
for( int i = 0; i <= chroma422; i++ )
h->dctf.sub8x8_dct( &dct4x4[4*i], p_src + 8*i*FENC_STRIDE, p_dst + 8*i*FDEC_STRIDE );
for( int i4x4 = 0; i4x4 < (chroma422?8:4); i4x4++ )
{
h->quantf.denoise_dct( dct4x4[i4x4], h->nr_residual_sum[2], h->nr_offset[2], 16 );
dct_dc[i4x4] = dct4x4[i4x4][0];
dct4x4[i4x4][0] = 0;
}
}
else
{
if( chroma422 )
h->dctf.sub8x16_dct_dc( dct_dc, p_src, p_dst );
else
h->dctf.sub8x8_dct_dc( dct_dc, p_src, p_dst );
}
for( int i = 0; i <= chroma422; i++ )
if( h->quantf.quant_2x2_dc( &dct_dc[4*i], h->quant4_mf[CQM_4PC][i_qp+3*chroma422][0] >> 1,
h->quant4_bias[CQM_4PC][i_qp+3*chroma422][0] << 1 ) )
return 0;
/* If there wasn't a termination in DC, we can check against a much higher threshold. */
if( ssd < thresh*4 )
continue;
if( !h->mb.b_noise_reduction )
for( int i = 0; i <= chroma422; i++ )
{
h->dctf.sub8x8_dct( &dct4x4[4*i], p_src + 8*i*FENC_STRIDE, p_dst + 8*i*FDEC_STRIDE );
dct4x4[i*4+0][0] = 0;
dct4x4[i*4+1][0] = 0;
dct4x4[i*4+2][0] = 0;
dct4x4[i*4+3][0] = 0;
}
/* calculate dct coeffs */
for( int i8x8 = 0, i_decimate_mb = 0; i8x8 < (chroma422?2:1); i8x8++ )
{
int nz = h->quantf.quant_4x4x4( &dct4x4[i8x8*4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
FOREACH_BIT( idx, i8x8*4, nz )
{
h->zigzagf.scan_4x4( dctscan, dct4x4[idx] );
i_decimate_mb += h->quantf.decimate_score15( dctscan );
if( i_decimate_mb >= 7 )
return 0;
}
}
}
}
h->mb.b_skip_mc = 1;
return 1;
}
int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
{
if( CHROMA_FORMAT == CHROMA_444 )
return x264_macroblock_probe_skip_internal( h, b_bidir, 3, CHROMA_444 );
else if( CHROMA_FORMAT == CHROMA_422 )
return x264_macroblock_probe_skip_internal( h, b_bidir, 1, CHROMA_422 );
else
return x264_macroblock_probe_skip_internal( h, b_bidir, 1, CHROMA_420 );
}
/****************************************************************************
* DCT-domain noise reduction / adaptive deadzone
* from libavcodec
****************************************************************************/
void x264_noise_reduction_update( x264_t *h )
{
h->nr_offset = h->nr_offset_denoise;
h->nr_residual_sum = h->nr_residual_sum_buf[0];
h->nr_count = h->nr_count_buf[0];
for( int cat = 0; cat < 3 + CHROMA444; cat++ )
{
int dct8x8 = cat&1;
int size = dct8x8 ? 64 : 16;
const uint32_t *weight = dct8x8 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
if( h->nr_count[cat] > (dct8x8 ? (1<<16) : (1<<18)) )
{
for( int i = 0; i < size; i++ )
h->nr_residual_sum[cat][i] >>= 1;
h->nr_count[cat] >>= 1;
}
for( int i = 0; i < size; i++ )
h->nr_offset[cat][i] =
((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
+ h->nr_residual_sum[cat][i]/2)
/ ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
/* Don't denoise DC coefficients */
h->nr_offset[cat][0] = 0;
}
}
/*****************************************************************************
* RD only; 4 calls to this do not make up for one macroblock_encode.
* doesn't transform chroma dc.
*****************************************************************************/
static ALWAYS_INLINE void x264_macroblock_encode_p8x8_internal( x264_t *h, int i8, int plane_count, int chroma )
{
int b_decimate = h->mb.b_dct_decimate;
int i_qp = h->mb.i_qp;
int x = i8&1;
int y = i8>>1;
int nz;
int chroma422 = chroma == CHROMA_422;
h->mb.i_cbp_chroma = 0;
h->mb.i_cbp_luma &= ~(1 << i8);
if( !h->mb.b_skip_mc )
x264_mb_mc_8x8( h, i8 );
if( h->mb.b_lossless )
{
for( int p = 0; p < plane_count; p++ )
{
pixel *p_fenc = h->mb.pic.p_fenc[p] + 8*x + 8*y*FENC_STRIDE;
pixel *p_fdec = h->mb.pic.p_fdec[p] + 8*x + 8*y*FDEC_STRIDE;
int nnz8x8 = 0;
if( h->mb.b_transform_8x8 )
{
nnz8x8 = h->zigzagf.sub_8x8( h->dct.luma8x8[4*p+i8], p_fenc, p_fdec );
STORE_8x8_NNZ( p, i8, nnz8x8 );
}
else
{
for( int i4 = i8*4; i4 < i8*4+4; i4++ )
{
nz = h->zigzagf.sub_4x4( h->dct.luma4x4[16*p+i4],
h->mb.pic.p_fenc[p]+block_idx_xy_fenc[i4],
h->mb.pic.p_fdec[p]+block_idx_xy_fdec[i4] );
h->mb.cache.non_zero_count[x264_scan8[16*p+i4]] = nz;
nnz8x8 |= nz;
}
}
h->mb.i_cbp_luma |= nnz8x8 << i8;
}
if( chroma == CHROMA_420 || chroma == CHROMA_422 )
{
for( int ch = 0; ch < 2; ch++ )
{
dctcoef dc;
pixel *p_fenc = h->mb.pic.p_fenc[1+ch] + 4*x + (chroma422?8:4)*y*FENC_STRIDE;
pixel *p_fdec = h->mb.pic.p_fdec[1+ch] + 4*x + (chroma422?8:4)*y*FDEC_STRIDE;
for( int i4x4 = 0; i4x4 <= chroma422; i4x4++ )
{
int offset = chroma422 ? 8*y + 2*i4x4 + x : i8;
nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+offset+ch*16], p_fenc+4*i4x4*FENC_STRIDE, p_fdec+4*i4x4*FDEC_STRIDE, &dc );
h->mb.cache.non_zero_count[x264_scan8[16+offset+ch*16]] = nz;
}
}
h->mb.i_cbp_chroma = 0x02;
}
}
else
{
if( h->mb.b_transform_8x8 )
{
for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
{
int quant_cat = p ? CQM_8PC : CQM_8PY;
pixel *p_fenc = h->mb.pic.p_fenc[p] + 8*x + 8*y*FENC_STRIDE;
pixel *p_fdec = h->mb.pic.p_fdec[p] + 8*x + 8*y*FDEC_STRIDE;
ALIGNED_ARRAY_32( dctcoef, dct8x8,[64] );
h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
int nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, ctx_cat_plane[DCT_LUMA_8x8][p], 0, p, i8 );
if( nnz8x8 )
{
h->zigzagf.scan_8x8( h->dct.luma8x8[4*p+i8], dct8x8 );
if( b_decimate && !h->mb.b_trellis )
nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[4*p+i8] );
if( nnz8x8 )
{
h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[quant_cat], i_qp );
h->dctf.add8x8_idct8( p_fdec, dct8x8 );
STORE_8x8_NNZ( p, i8, 1 );
h->mb.i_cbp_luma |= 1 << i8;
}
else
STORE_8x8_NNZ( p, i8, 0 );
}
else
STORE_8x8_NNZ( p, i8, 0 );
}
}
else
{
for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
{
int quant_cat = p ? CQM_4PC : CQM_4PY;
pixel *p_fenc = h->mb.pic.p_fenc[p] + 8*x + 8*y*FENC_STRIDE;
pixel *p_fdec = h->mb.pic.p_fdec[p] + 8*x + 8*y*FDEC_STRIDE;
int i_decimate_8x8 = b_decimate ? 0 : 4;
ALIGNED_ARRAY_32( dctcoef, dct4x4,[4],[16] );
int nnz8x8 = 0;
h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
STORE_8x8_NNZ( p, i8, 0 );
if( h->mb.b_noise_reduction )
for( int idx = 0; idx < 4; idx++ )
h->quantf.denoise_dct( dct4x4[idx], h->nr_residual_sum[0+!!p*2], h->nr_offset[0+!!p*2], 16 );
if( h->mb.b_trellis )
{
for( int i4x4 = 0; i4x4 < 4; i4x4++ )
{
if( x264_quant_4x4_trellis( h, dct4x4[i4x4], quant_cat, i_qp, ctx_cat_plane[DCT_LUMA_4x4][p], 0, !!p, i8*4+i4x4+p*16 ) )
{
h->zigzagf.scan_4x4( h->dct.luma4x4[p*16+i8*4+i4x4], dct4x4[i4x4] );
h->quantf.dequant_4x4( dct4x4[i4x4], h->dequant4_mf[quant_cat], i_qp );
if( i_decimate_8x8 < 4 )
i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[p*16+i8*4+i4x4] );
h->mb.cache.non_zero_count[x264_scan8[p*16+i8*4+i4x4]] = 1;
nnz8x8 = 1;
}
}
}
else
{
nnz8x8 = nz = h->quantf.quant_4x4x4( dct4x4, h->quant4_mf[quant_cat][i_qp], h->quant4_bias[quant_cat][i_qp] );
if( nz )
{
FOREACH_BIT( i4x4, 0, nz )
{
h->zigzagf.scan_4x4( h->dct.luma4x4[p*16+i8*4+i4x4], dct4x4[i4x4] );
h->quantf.dequant_4x4( dct4x4[i4x4], h->dequant4_mf[quant_cat], i_qp );
if( i_decimate_8x8 < 4 )
i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[p*16+i8*4+i4x4] );
h->mb.cache.non_zero_count[x264_scan8[p*16+i8*4+i4x4]] = 1;
}
}
}
if( nnz8x8 )
{
/* decimate this 8x8 block */
if( i_decimate_8x8 < 4 )
STORE_8x8_NNZ( p, i8, 0 );
else
{
h->dctf.add8x8_idct( p_fdec, dct4x4 );
h->mb.i_cbp_luma |= 1 << i8;
}
}
}
}
if( chroma == CHROMA_420 || chroma == CHROMA_422 )
{
i_qp = h->mb.i_chroma_qp;
for( int ch = 0; ch < 2; ch++ )
{
ALIGNED_ARRAY_32( dctcoef, dct4x4,[2],[16] );
pixel *p_fenc = h->mb.pic.p_fenc[1+ch] + 4*x + (chroma422?8:4)*y*FENC_STRIDE;
pixel *p_fdec = h->mb.pic.p_fdec[1+ch] + 4*x + (chroma422?8:4)*y*FDEC_STRIDE;
for( int i4x4 = 0; i4x4 <= chroma422; i4x4++ )
{
h->dctf.sub4x4_dct( dct4x4[i4x4], p_fenc + 4*i4x4*FENC_STRIDE, p_fdec + 4*i4x4*FDEC_STRIDE );
if( h->mb.b_noise_reduction )
h->quantf.denoise_dct( dct4x4[i4x4], h->nr_residual_sum[2], h->nr_offset[2], 16 );
dct4x4[i4x4][0] = 0;
if( h->mb.b_trellis )
nz = x264_quant_4x4_trellis( h, dct4x4[i4x4], CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 1, 0 );
else
nz = h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
int offset = chroma422 ? ((5*i8) & 0x09) + 2*i4x4 : i8;
h->mb.cache.non_zero_count[x264_scan8[16+offset+ch*16]] = nz;
if( nz )
{
h->zigzagf.scan_4x4( h->dct.luma4x4[16+offset+ch*16], dct4x4[i4x4] );
h->quantf.dequant_4x4( dct4x4[i4x4], h->dequant4_mf[CQM_4PC], i_qp );
h->dctf.add4x4_idct( p_fdec + 4*i4x4*FDEC_STRIDE, dct4x4[i4x4] );
}
}
}
h->mb.i_cbp_chroma = 0x02;
}
}
}
void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
{
if( CHROMA444 )
x264_macroblock_encode_p8x8_internal( h, i8, 3, CHROMA_444 );
else if( CHROMA_FORMAT == CHROMA_422 )
x264_macroblock_encode_p8x8_internal( h, i8, 1, CHROMA_422 );
else
x264_macroblock_encode_p8x8_internal( h, i8, 1, CHROMA_420 );
}
/*****************************************************************************
* RD only, luma only (for 4:2:0)
*****************************************************************************/
static ALWAYS_INLINE void x264_macroblock_encode_p4x4_internal( x264_t *h, int i4, int plane_count )
{
int i_qp = h->mb.i_qp;
for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
{
int quant_cat = p ? CQM_4PC : CQM_4PY;
pixel *p_fenc = &h->mb.pic.p_fenc[p][block_idx_xy_fenc[i4]];
pixel *p_fdec = &h->mb.pic.p_fdec[p][block_idx_xy_fdec[i4]];
int nz;
/* Don't need motion compensation as this function is only used in qpel-RD, which caches pixel data. */
if( h->mb.b_lossless )
{
nz = h->zigzagf.sub_4x4( h->dct.luma4x4[p*16+i4], p_fenc, p_fdec );
h->mb.cache.non_zero_count[x264_scan8[p*16+i4]] = nz;
}
else
{
ALIGNED_ARRAY_32( dctcoef, dct4x4,[16] );
h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
nz = x264_quant_4x4( h, dct4x4, i_qp, ctx_cat_plane[DCT_LUMA_4x4][p], 0, p, i4 );
h->mb.cache.non_zero_count[x264_scan8[p*16+i4]] = nz;
if( nz )
{
h->zigzagf.scan_4x4( h->dct.luma4x4[p*16+i4], dct4x4 );
h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[quant_cat], i_qp );
h->dctf.add4x4_idct( p_fdec, dct4x4 );
}
}
}
}
void x264_macroblock_encode_p4x4( x264_t *h, int i8 )
{
if( CHROMA444 )
x264_macroblock_encode_p4x4_internal( h, i8, 3 );
else
x264_macroblock_encode_p4x4_internal( h, i8, 1 );
}
| kodabb/x264 | encoder/macroblock.c | C | gpl-2.0 | 57,854 |
/*
* SocialLedge.com - Copyright (C) 2013
*
* This file is part of free software framework for embedded processors.
* You can use it and/or distribute it as long as this copyright header
* remains unmodified. The code is free for personal use and requires
* permission to use in a commercial product.
*
* THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
* OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
* I SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
* CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*
* You can reach the author of this software at :
* p r e e t . w i k i @ g m a i l . c o m
*/
#include <string.h>
#include "FreeRTOS.h"
#include "queue.h"
#include "task.h"
#include "can.h"
#include "LPC17xx.h"
#include "sys_config.h"
#include "lpc_sys.h" // sys_get_uptime_ms()
/**
* If non-zero, test code is enabled, and each message sent is self-recepted.
* You need to either connect a CAN transceiver, or connect RD/TD wires of
* the board with a 1K resistor for the tests to work.
*
* Note that FullCAN and CAN filter is not tested together, but they both work individually.
*/
#define CAN_TESTING 0
/// CAN index: enum to struct index conversion
#define CAN_INDEX(can) (can)
#define CAN_STRUCT_PTR(can) (&(g_can_structs[CAN_INDEX(can)]))
#define CAN_VALID(x) (can1 == x || can2 == x)
// Used by CAN_CT_ASSERT(). Obtained from http://www.pixelbeat.org/programming/gcc/static_assert.html
#define CAN_ASSERT_CONCAT_(a, b) a##b
#define CAN_ASSERT_CONCAT(a, b) CAN_ASSERT_CONCAT_(a, b)
#define CAN_CT_ASSERT(e) enum { CAN_ASSERT_CONCAT(assert_line_, __LINE__) = 1/(!!(e)) }
// Make some compile-time (CT) checks :
// Check the sizes of the structs because the size needs to match the HW registers
CAN_CT_ASSERT( 2 == sizeof(can_std_id_t));
CAN_CT_ASSERT( 4 == sizeof(can_ext_id_t));
CAN_CT_ASSERT( 8 == sizeof(can_data_t));
CAN_CT_ASSERT(16 == sizeof(can_msg_t));
CAN_CT_ASSERT(12 == sizeof(can_fullcan_msg_t));
/// Interrupt masks of the CANxIER and CANxICR registers
typedef enum {
intr_rx = (1 << 0), ///< Receive
intr_tx1 = (1 << 1), ///< Transmit 1
intr_warn = (1 << 2), ///< Warning (if error BUS status changes)
intr_ovrn = (1 << 3), ///< Data overrun
intr_wkup = (1 << 4), ///< Wake-up
intr_epi = (1 << 5), ///< Change from error active to error passive or vice versa
intr_ali = (1 << 6), ///< Arbitration lost
intr_berr = (1 << 7), ///< Bus error (happens during each error/retry of a message)
intr_idi = (1 << 8), ///< ID ready (a message was transmitted or aborted)
intr_tx2 = (1 << 9), ///< Transmit 2
intr_tx3 = (1 << 10), ///< Transmit 3
intr_all_tx = (intr_tx1 | intr_tx2 | intr_tx3), ///< Mask of the 3 transmit buffers
} can_intr_t;
/// Bit mask of SR register indicating which hardware buffer is available
enum {
tx1_avail = (1 << 2), ///< Transmit buffer 1 is available
tx2_avail = (1 << 10), ///< Transmit buffer 2 is available
tx3_avail = (1 << 18), ///< Transmit buffer 3 is available
tx_all_avail = (tx1_avail | tx2_avail | tx3_avail),
};
/**
* Data values of the AFMR register
* @note Since AFMR is common to both controllers, when bypass mode is enabled,
* then ALL messages from ALL CAN controllers will be accepted
*
* Bit1: Bypass Bit0: ACC Off
* 0 1 No messages accepted
* 1 X All messages accepted
* 0 0 HW Filter or FullCAN
*/
enum {
afmr_enabled = 0x00, ///< Hardware acceptance filtering
afmr_disabled = 0x01, ///< No messages will be accepted
afmr_bypass = 0x02, ///< Bypass mode, all messages will be accepted. Both 0x02 or 0x03 will work.
afmr_fullcan = 0x04, ///< Hardware will receive and store messages per FullCAN mode.
};
/// CAN MOD register values
enum {
can_mod_normal = 0x00, ///< CAN MOD register value to enable the BUS
can_mod_reset = 0x01, ///< CAN MOD register value to reset the BUS
can_mod_normal_tpm = (can_mod_normal | (1 << 3)), ///< CAN bus enabled with TPM mode bits set
can_mod_selftest = (1 << 2) | can_mod_normal, ///< Used to enable global self-test
};
/// Mask of the PCONP register
enum {
can1_pconp_mask = (1 << 13), ///< CAN1 power on bitmask
can2_pconp_mask = (1 << 14), ///< CAN2 power on bitmask
};
/// Typedef of CAN queues and data
typedef struct {
LPC_CAN_TypeDef *pCanRegs; ///< The pointer to the CAN registers
QueueHandle_t rxQ; ///< TX queue
QueueHandle_t txQ; ///< RX queue
uint16_t droppedRxMsgs; ///< Number of messages dropped if no space found during the CAN interrupt that queues the RX messages
uint16_t rxQWatermark; ///< Watermark of the FreeRTOS Rx Queue
uint16_t txQWatermark; ///< Watermark of the FreeRTOS Tx Queue
uint16_t txMsgCount; ///< Number of messages sent
uint16_t rxMsgCount; ///< Number of received messages
can_void_func_t bus_error; ///< When serious BUS error occurs
can_void_func_t data_overrun; ///< When we read the CAN buffer too late for incoming message
} can_struct_t ;
/// Structure of both CANs
can_struct_t g_can_structs[can_max] = { {LPC_CAN1}, {LPC_CAN2}};
/**
* This type of CAN interrupt should lead to "bus error", but note that intr_berr is not the right
* one as that one is triggered upon each type of CAN error which may be a simple "retry" that
* can be recovered. intr_epi or intr_warn should work for this selection.
*/
static const can_intr_t g_can_bus_err_intr = intr_epi;
/** @{ Private functions */
/**
* Sends a message using an available buffer. Initially this chose one out of the three buffers but that's
* a little tricky to use when messages are always queued since one of the 3 buffers can be starved and not
* get sent. So therefore some of that logic is #ifdef'd out to only use one HW buffer.
*
* @returns true if the message was written to the HW buffer to be sent, otherwise false if the HW buffer(s) are busy.
*
* Notes:
* - Using the TX message counter and the TPM bit, we can ensure that the HW chooses between the TX1/TX2/TX3
* in a round-robin fashion otherwise there is a possibility that if the CAN Tx queue is always full,
* a low message ID can be starved even if it was amongst the first ones written using this method call.
*
* @warning This should be called from critical section since this method is not thread-safe
*/
static bool CAN_tx_now (can_struct_t *struct_ptr, can_msg_t *msg_ptr)
{
// 32-bit command of CMR register to start transmission of one of the buffers
enum {
go_cmd_invalid = 0,
go_cmd_tx1 = 0x21,
go_cmd_tx2 = 0x41,
go_cmd_tx3 = 0x81,
};
LPC_CAN_TypeDef *pCAN = struct_ptr->pCanRegs;
const uint32_t can_sr_reg = pCAN->SR;
volatile can_msg_t *pHwMsgRegs = NULL;
uint32_t go_cmd = go_cmd_invalid;
if (can_sr_reg & tx1_avail){
pHwMsgRegs = (can_msg_t*)&(pCAN->TFI1);
go_cmd = go_cmd_tx1;
}
#if 0
else if (can_sr_reg & tx2_avail){
pHwMsgRegs = (can_msg_t*)&(pCAN->TFI2);
go_cmd = go_cmd_tx2;
}
else if (can_sr_reg & tx3_avail){
pHwMsgRegs = (can_msg_t*)&(pCAN->TFI3);
go_cmd = go_cmd_tx3;
}
#endif
else {
/* No buffer available, return failure */
return false;
}
/* Copy the CAN message to the HW CAN registers and write the 8 TPM bits.
* We set TPM bits each time by using the txMsgCount because otherwise if TX1, and TX2 are always
* being written with a lower message ID, then TX3 will starve and never be sent.
*/
#if 0
// Higher number will be sent later, but how do we handle the rollover from 255 to 0 because then the
// newly written 0 will be sent, and buffer that contains higher TPM can starve.
const uint8_t tpm = struct_ptr->txMsgCount;
msg_ptr->frame |= tpm;
#endif
*pHwMsgRegs = *msg_ptr;
struct_ptr->txMsgCount++;
#if CAN_TESTING
go_cmd &= (0xF0);
go_cmd = (1 << 4); /* Self reception */
#endif
/* Send the message! */
pCAN->CMR = go_cmd;
return true;
}
static void CAN_handle_isr(const can_t can)
{
can_struct_t *pStruct = CAN_STRUCT_PTR(can);
LPC_CAN_TypeDef *pCAN = pStruct->pCanRegs;
const uint32_t rbs = (1 << 0);
const uint32_t ibits = pCAN->ICR;
UBaseType_t count;
can_msg_t msg;
/* Handle the received message */
if ((ibits & intr_rx) | (pCAN->GSR & rbs)) {
if( (count = uxQueueMessagesWaitingFromISR(pStruct->rxQ)) > pStruct->rxQWatermark) {
pStruct->rxQWatermark = count;
}
can_msg_t *pHwMsgRegs = (can_msg_t*) &(pCAN->RFS);
if (xQueueSendFromISR(pStruct->rxQ, pHwMsgRegs, NULL)) {
pStruct->rxMsgCount++;
}
else {
pStruct->droppedRxMsgs++;
}
pCAN->CMR = 0x04; // Release the receive buffer, no need to bitmask
}
/* A transmit finished, send any queued message(s) */
if (ibits & intr_all_tx) {
if( (count = uxQueueMessagesWaitingFromISR(pStruct->txQ)) > pStruct->txQWatermark) {
pStruct->txQWatermark = count;
}
if (xQueueReceiveFromISR(pStruct->txQ, &msg, NULL)) {
CAN_tx_now(pStruct, &msg);
}
}
/* We only enable interrupt when a valid callback exists, so no need
* to check for the callback function being NULL
*/
if (ibits & g_can_bus_err_intr) {
pStruct->bus_error(ibits);
}
if (ibits & intr_ovrn) {
pStruct->data_overrun(ibits);
}
}
/** @} */
/**
* Actual ISR Handler (mapped to startup file's interrupt vector function name)
* This interrupt is shared between CAN1, and CAN2
*/
#ifdef __cplusplus
extern "C" {
#endif
void CAN_IRQHandler(void)
{
const uint32_t pconp = LPC_SC->PCONP;
/* Reading registers without CAN powered up will cause DABORT exception */
if (pconp & can1_pconp_mask) {
CAN_handle_isr(can1);
}
if (pconp & can2_pconp_mask) {
CAN_handle_isr(can2);
}
}
#ifdef __cplusplus
}
#endif
bool CAN_init(can_t can, uint32_t baudrate_kbps, uint16_t rxq_size, uint16_t txq_size,
can_void_func_t bus_off_cb, can_void_func_t data_ovr_cb)
{
if (!CAN_VALID(can)){
return false;
}
can_struct_t *pStruct = CAN_STRUCT_PTR(can);
LPC_CAN_TypeDef *pCAN = pStruct->pCanRegs;
bool failed = true;
/* Enable CAN Power, and select the PINS
* CAN1 is at P0.0, P0.1 and P0.21, P0.22
* CAN2 is at P0.4, P0.5 and P2.7, P2.8
* On SJ-One board, we have P0.0, P0.1, and P2.7, P2.8
*/
if (can1 == can) {
LPC_SC->PCONP |= can1_pconp_mask;
LPC_PINCON->PINSEL0 &= ~(0xF << 0);
LPC_PINCON->PINSEL0 |= (0x5 << 0);
}
else if (can2 == can){
LPC_SC->PCONP |= can2_pconp_mask;
LPC_PINCON->PINSEL4 &= ~(0xF << 14);
LPC_PINCON->PINSEL4 |= (0x5 << 14);
}
/* Create the queues with minimum size of 1 to avoid NULL pointer reference */
if (!pStruct->rxQ) {
pStruct->rxQ = xQueueCreate(rxq_size ? rxq_size : 1, sizeof(can_msg_t));
}
if (!pStruct->txQ) {
pStruct->txQ = xQueueCreate(txq_size ? txq_size : 1, sizeof(can_msg_t));
}
/* The CAN dividers must all be the same for both CANs
* Set the dividers of CAN1, CAN2, ACF to CLK / 1
*/
lpc_pclk(pclk_can1, clkdiv_1);
lpc_pclk(pclk_can2, clkdiv_1);
lpc_pclk(pclk_can_flt, clkdiv_1);
pCAN->MOD = can_mod_reset;
pCAN->IER = 0x0; // Disable All CAN Interrupts
pCAN->GSR = 0x0; // Clear error counters
pCAN->CMR = 0xE; // Abort Tx, release Rx, clear data over-run
/**
* About the AFMR register :
* B0 B1
* Filter Mode | AccOff bit | AccBP bit | CAN Rx interrupt
* Off Mode 1 0 No messages accepted
* Bypass Mode X 1 All messages accepted
* FullCAN 0 0 HW acceptance filtering
*/
LPC_CANAF->AFMR = afmr_disabled;
// Clear pending interrupts and the CAN Filter RAM
LPC_CANAF_RAM->mask[0] = pCAN->ICR;
memset((void*)&(LPC_CANAF_RAM->mask[0]), 0, sizeof(LPC_CANAF_RAM->mask));
/* Zero out the filtering registers */
LPC_CANAF->SFF_sa = 0;
LPC_CANAF->SFF_GRP_sa = 0;
LPC_CANAF->EFF_sa = 0;
LPC_CANAF->EFF_GRP_sa = 0;
LPC_CANAF->ENDofTable = 0;
/* Do not accept any messages until CAN filtering is enabled */
LPC_CANAF->AFMR = afmr_disabled;
/* Set the baud-rate. You can verify the settings by visiting:
* http://www.kvaser.com/en/support/bit-timing-calculator.html
*/
do {
const uint32_t baudDiv = sys_get_cpu_clock() / (1000 * baudrate_kbps);
const uint32_t SJW = 3;
const uint32_t SAM = 0;
uint32_t BRP = 0, TSEG1 = 0, TSEG2 = 0, NT = 0;
/* Calculate suitable nominal time value
* NT (nominal time) = (TSEG1 + TSEG2 + 3)
* NT <= 24
* TSEG1 >= 2*TSEG2
*/
failed = true;
for(NT=24; NT > 0; NT-=2) {
if ((baudDiv % NT)==0) {
BRP = baudDiv / NT - 1;
NT--;
TSEG2 = (NT/3) - 1;
TSEG1 = NT -(NT/3) - 1;
failed = false;
break;
}
}
if (!failed) {
pCAN->BTR = (SAM << 23) | (TSEG2<<20) | (TSEG1<<16) | (SJW<<14) | BRP;
// CANx->BTR = 0x002B001D; // 48Mhz 100Khz
}
} while (0);
/* If everything okay so far, enable the CAN interrupts */
if (!failed) {
/* At minimum, we need Rx/Tx interrupts */
pCAN->IER = (intr_rx | intr_all_tx);
/* Enable BUS-off interrupt and callback if given */
if (bus_off_cb) {
pStruct->bus_error = bus_off_cb;
pCAN->IER |= g_can_bus_err_intr;
}
/* Enable data-overrun interrupt and callback if given */
if (data_ovr_cb) {
pStruct->data_overrun = data_ovr_cb;
pCAN->IER |= intr_ovrn;
}
/* Finally, enable the actual CPU core interrupt */
vTraceSetISRProperties(CAN_IRQn, "CAN", IP_can);
NVIC_EnableIRQ(CAN_IRQn);
}
/* return true if all is well */
return (false == failed);
}
bool CAN_tx (can_t can, can_msg_t *pCanMsg, uint32_t timeout_ms)
{
if (!CAN_VALID(can) || !pCanMsg || CAN_is_bus_off(can)) {
return false;
}
bool ok = false;
can_struct_t *pStruct = CAN_STRUCT_PTR(can);
LPC_CAN_TypeDef *CANx = pStruct->pCanRegs;
/* Try transmitting to one of the available buffers */
taskENTER_CRITICAL();
do {
ok = CAN_tx_now(pStruct, pCanMsg);
} while(0);
taskEXIT_CRITICAL();
/* If HW buffer not available, then just queue the message */
if (!ok) {
if (taskSCHEDULER_RUNNING == xTaskGetSchedulerState()) {
ok = xQueueSend(pStruct->txQ, pCanMsg, OS_MS(timeout_ms));
}
else {
ok = xQueueSend(pStruct->txQ, pCanMsg, 0);
}
/* There is possibility that before we queued the message, we got interrupted
* and all hw buffers were emptied meanwhile, and our queued message will now
* sit in the queue forever until another Tx interrupt takes place.
* So we dequeue it here if all are empty and send it over.
*/
taskENTER_CRITICAL();
do {
can_msg_t msg;
if (tx_all_avail == (CANx->SR & tx_all_avail) &&
xQueueReceive(pStruct->txQ, &msg, 0)
) {
ok = CAN_tx_now(pStruct, &msg);
}
} while(0);
taskEXIT_CRITICAL();
}
return ok;
}
bool CAN_rx (can_t can, can_msg_t *pCanMsg, uint32_t timeout_ms)
{
bool ok = false;
if (CAN_VALID(can) && pCanMsg)
{
if (taskSCHEDULER_RUNNING == xTaskGetSchedulerState()) {
ok = xQueueReceive(CAN_STRUCT_PTR(can)->rxQ, pCanMsg, OS_MS(timeout_ms));
}
else {
uint64_t msg_timeout = sys_get_uptime_ms() + timeout_ms;
while (! (ok = xQueueReceive(CAN_STRUCT_PTR(can)->rxQ, pCanMsg, 0))) {
if (sys_get_uptime_ms() > msg_timeout) {
break;
}
}
}
}
return ok;
}
bool CAN_is_bus_off(can_t can)
{
const uint32_t bus_off_mask = (1 << 7);
return (!CAN_VALID(can)) ? true : !! (CAN_STRUCT_PTR(can)->pCanRegs->GSR & bus_off_mask);
}
void CAN_reset_bus(can_t can)
{
if (CAN_VALID(can)) {
CAN_STRUCT_PTR(can)->pCanRegs->MOD = can_mod_reset;
#if CAN_TESTING
CAN_STRUCT_PTR(can)->pCanRegs->MOD = can_mod_selftest;
#else
CAN_STRUCT_PTR(can)->pCanRegs->MOD = can_mod_normal_tpm;
#endif
}
}
uint16_t CAN_get_rx_watermark(can_t can)
{
return CAN_VALID(can) ? CAN_STRUCT_PTR(can)->rxQWatermark : 0;
}
uint16_t CAN_get_tx_watermark(can_t can)
{
return CAN_VALID(can) ? CAN_STRUCT_PTR(can)->txQWatermark : 0;
}
uint16_t CAN_get_tx_count(can_t can)
{
return CAN_VALID(can) ? CAN_STRUCT_PTR(can)->txMsgCount : 0;
}
uint16_t CAN_get_rx_count(can_t can)
{
return CAN_VALID(can) ? CAN_STRUCT_PTR(can)->rxMsgCount : 0;
}
uint16_t CAN_get_rx_dropped_count(can_t can)
{
return CAN_VALID(can) ? CAN_STRUCT_PTR(can)->droppedRxMsgs : 0;
}
void CAN_bypass_filter_accept_all_msgs(void)
{
LPC_CANAF->AFMR = afmr_bypass;
}
can_std_id_t CAN_gen_sid(can_t can, uint16_t id)
{
/* SCC in datasheet is defined as can controller - 1 */
const uint16_t scc = (can);
can_std_id_t ret;
ret.can_num = scc;
ret.disable = (0xffff == id) ? 1 : 0;
ret.fc_intr = 0;
ret.id = id;
return ret;
}
can_ext_id_t CAN_gen_eid(can_t can, uint32_t id)
{
/* SCC in datasheet is defined as can controller - 1 */
const uint16_t scc = (can);
can_ext_id_t ret;
ret.can_num = scc;
ret.id = id;
return ret;
}
bool CAN_fullcan_add_entry(can_t can, can_std_id_t id1, can_std_id_t id2)
{
/* Return if invalid CAN given */
if (!CAN_VALID(can)) {
return false;
}
/* Check for enough room for more FullCAN entries
* Each entry takes 2-byte entry, and 12-byte message RAM.
*/
const uint16_t existing_entries = CAN_fullcan_get_num_entries();
const uint16_t size_per_entry = sizeof(can_std_id_t) + sizeof(can_fullcan_msg_t);
if ((existing_entries * size_per_entry) >= sizeof(LPC_CANAF_RAM->mask)) {
return false;
}
/* Locate where we should write the next entry */
uint8_t *base = (uint8_t*) &(LPC_CANAF_RAM->mask[0]);
uint8_t *next_entry_ptr = base + LPC_CANAF->SFF_sa;
/* Copy the new entry into the RAM filter */
LPC_CANAF->AFMR = afmr_disabled;
do {
const uint32_t entries = ((uint32_t) id2.raw & UINT16_MAX) | ((uint32_t) id1.raw << 16);
* (uint32_t*) (next_entry_ptr) = entries;
/* The new start of Standard Frame Filter is after the two new entries */
const uint32_t new_sff_sa = LPC_CANAF->SFF_sa + sizeof(id1) + sizeof(id2);
LPC_CANAF->SFF_sa = new_sff_sa;
/* Next filters start at SFF_sa (they are disabled) */
LPC_CANAF->SFF_GRP_sa = new_sff_sa;
LPC_CANAF->EFF_sa = new_sff_sa;
LPC_CANAF->EFF_GRP_sa = new_sff_sa;
LPC_CANAF->ENDofTable = new_sff_sa;
} while(0);
LPC_CANAF->AFMR = afmr_fullcan;
return true;
}
can_fullcan_msg_t* CAN_fullcan_get_entry_ptr(can_std_id_t fc_id)
{
/* Number of entries depends on how far SFF_sa is from base of 0 */
const uint16_t num_entries = CAN_fullcan_get_num_entries();
uint16_t idx = 0;
/* The FullCAN entries are at the base of the CAN RAM */
const can_std_id_t *id_list = (can_std_id_t*) &(LPC_CANAF_RAM->mask[0]);
/* Find the standard ID entered into the RAM
* Once we find the ID, its message's RAM location is after
* LPC_CANAF->ENDofTable based on the index location.
*
* Note that due to MSB/LSB of the CAN RAM, we check in terms of 16-bit WORDS
* and LSB word match means we will find it at index + 1, and MSB word match
* means we will find it at the index.
*/
for (idx = 0; idx < num_entries; idx+=2) {
if (id_list[idx].id == fc_id.id) {
++idx;
break;
}
if (id_list[idx+1].id == fc_id.id) {
break;
}
}
can_fullcan_msg_t *real_entry = NULL;
if (idx < num_entries) {
/* If we find an index, we have to convert it to the actual message pointer */
can_fullcan_msg_t *base_msg_entry = (can_fullcan_msg_t*)
(((uint8_t*) &(LPC_CANAF_RAM->mask[0])) + LPC_CANAF->ENDofTable);
real_entry = (base_msg_entry + idx);
}
return real_entry;
}
bool CAN_fullcan_read_msg_copy(can_fullcan_msg_t *pMsg, can_fullcan_msg_t *pMsgCopy)
{
const uint8_t *can_ram_base = (uint8_t*) &(LPC_CANAF_RAM->mask[0]);
const uint8_t *start = can_ram_base + LPC_CANAF->ENDofTable; // Actual FullCAN msgs are stored after this
const uint8_t *end = can_ram_base + sizeof(LPC_CANAF_RAM->mask); // Last byte of CAN RAM + 1
bool new_msg_received = false;
/* Validate the input pointers. pMsg must be within range of our RAM filter
* where the actual FullCAN message should be stored at
*/
const uint8_t *ptr = (uint8_t*) pMsg;
if (ptr < start || ptr >= end || !pMsgCopy) {
return false;
}
/* If semaphore bits change, then HW has updated the message so read it again.
* After HW writes new message, semaphore bits are changed to 0b11.
*/
while (0 != pMsg->semphr) {
new_msg_received = true;
pMsg->semphr = 0;
*pMsgCopy = *pMsg;
}
return new_msg_received;
}
uint8_t CAN_fullcan_get_num_entries(void)
{
return LPC_CANAF->SFF_sa / sizeof(can_std_id_t);
}
bool CAN_setup_filter(const can_std_id_t *std_id_list, uint16_t sid_cnt,
const can_std_grp_id_t *std_group_id_list, uint16_t sgp_cnt,
const can_ext_id_t *ext_id_list, uint16_t eid_cnt,
const can_ext_grp_id_t *ext_group_id_list, uint16_t egp_cnt)
{
bool ok = true;
uint32_t i = 0;
uint32_t temp32 = 0;
// Count of standard IDs must be even
if (sid_cnt & 1) {
return false;
}
LPC_CANAF->AFMR = afmr_disabled;
do {
/* Filter RAM is after the FulLCAN entries */
uint32_t can_ram_base_addr = (uint32_t) &(LPC_CANAF_RAM->mask[0]);
/* FullCAN entries take up 2 bytes each at beginning RAM, and 12-byte sections at the end */
const uint32_t can_ram_end_addr = can_ram_base_addr + sizeof(LPC_CANAF_RAM->mask) -
( sizeof(can_fullcan_msg_t) * CAN_fullcan_get_num_entries());
/* Our filter RAM is after FullCAN entries */
uint32_t *ptr = (uint32_t*) (can_ram_base_addr + LPC_CANAF->SFF_sa);
/* macro to swap top and bottom 16-bits of 32-bit DWORD */
#define CAN_swap32(t32) do { \
t32 = (t32 >> 16) | (t32 << 16);\
} while (0)
/**
* Standard ID list and group list need to swapped otherwise setting the wrong
* filter will make the CAN ISR go into a loop for no apparent reason.
* It looks like the filter data is motorolla big-endian format.
* See "configuration example 5" in CAN chapter.
*/
#define CAN_add_filter_list(list, ptr, end, cnt, entry_size, swap) \
do { if (NULL != list) { \
if ((uint32_t)ptr + (cnt * entry_size) < end) { \
for (i = 0; i < (cnt * entry_size)/4; i++) { \
if(swap) { \
temp32 = ((uint32_t*)list) [i]; \
CAN_swap32(temp32); \
ptr[i] = temp32; \
} \
else { \
ptr[i] = ((uint32_t*)list) [i]; \
} \
} \
ptr += (cnt * entry_size)/4; \
} else { ok = false; } } } while(0)
/* The sa (start addresses) are byte address offset from CAN RAM
* and must be 16-bit (WORD) aligned
* LPC_CANAF->SFF_sa should already be setup by FullCAN if used, or
* set to zero by the can init function.
*/
CAN_add_filter_list(std_id_list, ptr, can_ram_end_addr, sid_cnt, sizeof(can_std_id_t), true);
LPC_CANAF->SFF_GRP_sa = ((uint32_t)ptr - can_ram_base_addr);
CAN_add_filter_list(std_group_id_list, ptr, can_ram_end_addr, sgp_cnt, sizeof(can_std_grp_id_t), true);
LPC_CANAF->EFF_sa = ((uint32_t)ptr - can_ram_base_addr);
CAN_add_filter_list(ext_id_list, ptr, can_ram_end_addr, eid_cnt, sizeof(can_ext_id_t), false);
LPC_CANAF->EFF_GRP_sa = ((uint32_t)ptr - can_ram_base_addr);
CAN_add_filter_list(ext_group_id_list, ptr, can_ram_end_addr, egp_cnt, sizeof(can_ext_grp_id_t), false);
/* End of table is where the FullCAN messages are stored */
LPC_CANAF->ENDofTable = ((uint32_t)ptr - can_ram_base_addr);
} while(0);
/* If there was no FullCAN entry, then SFF_sa will be zero.
* If it was zero, we just enable the AFMR, but if it was not zero, that means
* FullCAN entry was added, so we restore AMFR to fullcan enable
*/
LPC_CANAF->AFMR = (0 == LPC_CANAF->SFF_sa) ? afmr_enabled : afmr_fullcan;
return ok;
}
#if CAN_TESTING
#include <printf_lib.h>
#define CAN_ASSERT(x) if (!(x)) { u0_dbg_printf("Failed at %i, BUS: %s MOD: 0x%08x, GSR: 0x%08x\n"\
"IER/ICR: 0x%08X/0x%08x BTR: 0x%08x"\
"\nLine %i: %s\n", __LINE__, \
CAN_is_bus_off(can1) ? "OFF" : "ON", \
(int)LPC_CAN1->MOD, (int)LPC_CAN1->GSR, \
(int)LPC_CAN1->IER, (int)LPC_CAN1->ICR, \
(int)LPC_CAN1->BTR, \
__LINE__, #x); return false; }
void CAN_test_bufoff_cb(uint32_t d)
{
u0_dbg_printf("CB: BUS OFF\n");
}
void CAN_test_bufovr_cb(uint32_t d)
{
u0_dbg_printf("CB: DATA OVR\n");
}
bool CAN_test(void)
{
uint32_t i = 0;
#define can_test_msg(msg, id, rxtrue) do { \
u0_dbg_printf("Send ID: 0x%08X\n", id); \
msg.msg_id = id; \
CAN_ASSERT(CAN_tx(can1, &msg, 0)); \
msg.msg_id = 0; \
CAN_ASSERT(rxtrue == CAN_rx(can1, &msg, 10)); \
if (rxtrue) CAN_ASSERT(id == msg.msg_id); \
} while(0)
u0_dbg_printf(" Test init()\n");
CAN_ASSERT(!CAN_init(can_max, 100, 0, 0, NULL, NULL));
CAN_ASSERT(CAN_init(can1, 100, 5, 5, CAN_test_bufoff_cb, CAN_test_bufovr_cb));
CAN_ASSERT(LPC_CAN1->MOD == can_mod_reset);
CAN_bypass_filter_accept_all_msgs();
CAN_ASSERT(g_can_rx_qs[0] != NULL);
CAN_ASSERT(g_can_tx_qs[0] != NULL);
CAN_ASSERT(LPC_CANAF->SFF_sa == 0);
CAN_ASSERT(LPC_CANAF->SFF_GRP_sa == 0);
CAN_ASSERT(LPC_CANAF->EFF_sa == 0);
CAN_ASSERT(LPC_CANAF->EFF_GRP_sa == 0);
CAN_ASSERT(LPC_CANAF->ENDofTable == 0);
CAN_reset_bus(can1);
CAN_ASSERT(LPC_CAN1->MOD == can_mod_selftest);
/* Create a message, and test tx with bad input */
uint32_t id = 0x100;
can_msg_t msg;
memset(&msg, 0, sizeof(msg));
msg.frame = 0;
msg.msg_id = id;
msg.frame_fields.is_29bit = 0;
msg.frame_fields.data_len = 8;
msg.data.qword = 0x1122334455667788;
CAN_ASSERT(!CAN_tx(can_max, &msg, 0)); // Invalid CAN
CAN_ASSERT(!CAN_rx(can1, NULL, 0)); // Invalid message pointer
/* Send msg and test receive */
u0_dbg_printf(" Test Tx/Rx\n");
can_test_msg(msg, 0x100, true);
can_test_msg(msg, 0x200, true);
can_test_msg(msg, 0x300, true);
can_test_msg(msg, 0x400, true);
can_test_msg(msg, 0x500, true);
const can_std_id_t slist[] = { CAN_gen_sid(can1, 0x100), CAN_gen_sid(can1, 0x110), // 2 entries
CAN_gen_sid(can1, 0x120), CAN_gen_sid(can1, 0x130) // 2 entries
};
const can_std_grp_id_t sglist[] = { {CAN_gen_sid(can1, 0x200), CAN_gen_sid(can1, 0x210)}, // Group 1
{CAN_gen_sid(can1, 0x220), CAN_gen_sid(can1, 0x230)} // Group 2
};
const can_ext_id_t elist[] = { CAN_gen_eid(can1, 0x7500), CAN_gen_eid(can1, 0x8500)};
const can_ext_grp_id_t eglist[] = { {CAN_gen_eid(can1, 0xA000), CAN_gen_eid(can1, 0xB000)} }; // Group 1
/* Test filter setup */
u0_dbg_printf(" Test filter setup\n");
CAN_setup_filter(slist, 4, sglist, 2, elist, 2, eglist, 1);
/* We use offset of zero if 2 FullCAN messages are added, otherwise 4 if none were added above */
const uint8_t offset = 4;
CAN_ASSERT(LPC_CANAF->SFF_sa == 4 - offset);
CAN_ASSERT(LPC_CANAF->SFF_GRP_sa == 12 - offset);
CAN_ASSERT(LPC_CANAF->EFF_sa == 20 - offset);
CAN_ASSERT(LPC_CANAF->EFF_GRP_sa == 28 - offset);
CAN_ASSERT(LPC_CANAF->ENDofTable == 36 - offset);
for ( i = 0; i < 10; i++) {
u0_dbg_printf("%2i: 0x%08X\n", i, (uint32_t) LPC_CANAF_RAM->mask[i]);
}
/* Send a message defined in filter */
u0_dbg_printf(" Test filter messages\n");
msg.frame = 0;
msg.frame_fields.is_29bit = 0;
msg.frame_fields.data_len = 8;
msg.data.qword = 0x1122334455667788;
/* Test reception of messages defined in the filter */
u0_dbg_printf(" Test message reception according to filter\n");
msg.frame_fields.is_29bit = 0;
can_test_msg(msg, 0x100, true); // standard id
can_test_msg(msg, 0x110, true); // standard id
can_test_msg(msg, 0x120, true); // standard id
can_test_msg(msg, 0x130, true); // standard id
can_test_msg(msg, 0x200, true); // Start of standard ID group
can_test_msg(msg, 0x210, true); // Last of standard ID group
can_test_msg(msg, 0x220, true); // Start of standard ID group
can_test_msg(msg, 0x230, true); // Last of standard ID group
msg.frame_fields.is_29bit = 1;
can_test_msg(msg, 0x7500, true); // extended id
can_test_msg(msg, 0x8500, true); // extended id
can_test_msg(msg, 0xA000, true); // extended id group start
can_test_msg(msg, 0xB000, true); // extended id group end
u0_dbg_printf(" Test messages that should not be received\n");
/* Send a message not defined in filter */
msg.frame_fields.is_29bit = 0;
can_test_msg(msg, 0x0FF, false);
can_test_msg(msg, 0x111, false);
can_test_msg(msg, 0x131, false);
can_test_msg(msg, 0x1FF, false);
can_test_msg(msg, 0x211, false);
can_test_msg(msg, 0x21f, false);
can_test_msg(msg, 0x231, false);
msg.frame_fields.is_29bit = 1;
can_test_msg(msg, 0x7501, false);
can_test_msg(msg, 0x8501, false);
can_test_msg(msg, 0xA000-1, false);
can_test_msg(msg, 0xB000+1, false);
/* Test FullCAN */
u0_dbg_printf(" Test FullCAN\n");
CAN_init(can1, 100, 5, 5, CAN_test_bufoff_cb, CAN_test_bufovr_cb);
CAN_reset_bus(can1);
id = 0x100;
CAN_ASSERT(0 == CAN_fullcan_get_num_entries());
CAN_ASSERT(CAN_fullcan_add_entry(can1, CAN_gen_sid(can1, id), CAN_gen_sid(can1, id+1)));
CAN_ASSERT(2 == CAN_fullcan_get_num_entries());
CAN_ASSERT(LPC_CANAF->SFF_sa == 4);
CAN_ASSERT(LPC_CANAF->SFF_GRP_sa == 4);
CAN_ASSERT(LPC_CANAF->EFF_sa == 4);
CAN_ASSERT(LPC_CANAF->EFF_GRP_sa == 4);
CAN_ASSERT(LPC_CANAF->ENDofTable == 4);
CAN_ASSERT(CAN_fullcan_add_entry(can1, CAN_gen_sid(can1, id+2), CAN_gen_sid(can1, id+3)));
CAN_ASSERT(4 == CAN_fullcan_get_num_entries());
CAN_ASSERT(LPC_CANAF->SFF_sa == 8);
for ( i = 0; i < 3; i++) {
u0_dbg_printf("%2i: 0x%08X\n", i, (uint32_t) LPC_CANAF_RAM->mask[i]);
}
can_fullcan_msg_t *fc1 = CAN_fullcan_get_entry_ptr(CAN_gen_sid(can1, id));
can_fullcan_msg_t *fc2 = CAN_fullcan_get_entry_ptr(CAN_gen_sid(can1, id+1));
can_fullcan_msg_t *fc3 = CAN_fullcan_get_entry_ptr(CAN_gen_sid(can1, id+2));
can_fullcan_msg_t *fc4 = CAN_fullcan_get_entry_ptr(CAN_gen_sid(can1, id+3));
CAN_ASSERT((LPC_CANAF_RAM_BASE + LPC_CANAF->SFF_sa) == (uint32_t)fc1);
CAN_ASSERT((LPC_CANAF_RAM_BASE + LPC_CANAF->SFF_sa + 1*sizeof(can_fullcan_msg_t)) == (uint32_t)fc2);
CAN_ASSERT((LPC_CANAF_RAM_BASE + LPC_CANAF->SFF_sa + 2*sizeof(can_fullcan_msg_t)) == (uint32_t)fc3);
CAN_ASSERT((LPC_CANAF_RAM_BASE + LPC_CANAF->SFF_sa + 3*sizeof(can_fullcan_msg_t)) == (uint32_t)fc4);
can_fullcan_msg_t fc_temp;
CAN_ASSERT(!CAN_fullcan_read_msg_copy(fc1, &fc_temp));
CAN_ASSERT(!CAN_fullcan_read_msg_copy(fc2, &fc_temp));
CAN_ASSERT(!CAN_fullcan_read_msg_copy(fc3, &fc_temp));
CAN_ASSERT(!CAN_fullcan_read_msg_copy(fc4, &fc_temp));
/* Send message, see if fullcan captures it */
msg.frame = 0;
msg.msg_id = id;
msg.frame_fields.is_29bit = 0;
msg.frame_fields.data_len = 8;
#define can_test_fullcan_msg(fc, msg_copy, id) \
do { \
msg.msg_id = id; \
CAN_ASSERT(CAN_tx(can1, &msg, 0)); \
CAN_ASSERT(!CAN_rx(can1, &msg, 10)); \
CAN_ASSERT(CAN_fullcan_read_msg_copy(fc, &msg_copy)); \
CAN_ASSERT(fc->msg_id == id) \
} while(0)
can_test_fullcan_msg(fc1, fc_temp, id+0); CAN_ASSERT(!CAN_fullcan_read_msg_copy(fc2, &fc_temp));
can_test_fullcan_msg(fc2, fc_temp, id+1); CAN_ASSERT(!CAN_fullcan_read_msg_copy(fc3, &fc_temp));
can_test_fullcan_msg(fc3, fc_temp, id+2); CAN_ASSERT(!CAN_fullcan_read_msg_copy(fc4, &fc_temp));
can_test_fullcan_msg(fc4, fc_temp, id+3); CAN_ASSERT(!CAN_fullcan_read_msg_copy(fc1, &fc_temp));
u0_dbg_printf(" \n--> All tests successful! <--\n");
return true;
}
#endif
| kammce/SJSU-DEV-Linux | firmware/default/lib/L2_Drivers/src/can.c | C | gpl-2.0 | 36,142 |
/*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* Copyright (c) 2008 Dave Chinner
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_trace.h"
#include "xfs_error.h"
#include "xfs_log.h"
#ifdef DEBUG
/*
* Check that the list is sorted as it should be.
*/
STATIC void
xfs_ail_check(
struct xfs_ail *ailp,
xfs_log_item_t *lip)
{
xfs_log_item_t *prev_lip;
if (list_empty(&ailp->xa_ail))
return;
/*
* Check the next and previous entries are valid.
*/
ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
if (&prev_lip->li_ail != &ailp->xa_ail)
ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
if (&prev_lip->li_ail != &ailp->xa_ail)
ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
}
#else /* !DEBUG */
#define xfs_ail_check(a,l)
#endif /* DEBUG */
/*
* Return a pointer to the last item in the AIL. If the AIL is empty, then
* return NULL.
*/
static xfs_log_item_t *
xfs_ail_max(
struct xfs_ail *ailp)
{
if (list_empty(&ailp->xa_ail))
return NULL;
return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
}
/*
* Return a pointer to the item which follows the given item in the AIL. If
* the given item is the last item in the list, then return NULL.
*/
static xfs_log_item_t *
xfs_ail_next(
struct xfs_ail *ailp,
xfs_log_item_t *lip)
{
if (lip->li_ail.next == &ailp->xa_ail)
return NULL;
return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
}
/*
* This is called by the log manager code to determine the LSN of the tail of
* the log. This is exactly the LSN of the first item in the AIL. If the AIL
* is empty, then this function returns 0.
*
* We need the AIL lock in order to get a coherent read of the lsn of the last
* item in the AIL.
*/
xfs_lsn_t
xfs_ail_min_lsn(
struct xfs_ail *ailp)
{
xfs_lsn_t lsn = 0;
xfs_log_item_t *lip;
spin_lock(&ailp->xa_lock);
lip = xfs_ail_min(ailp);
if (lip)
lsn = lip->li_lsn;
spin_unlock(&ailp->xa_lock);
return lsn;
}
/*
* Return the maximum lsn held in the AIL, or zero if the AIL is empty.
*/
static xfs_lsn_t
xfs_ail_max_lsn(
struct xfs_ail *ailp)
{
xfs_lsn_t lsn = 0;
xfs_log_item_t *lip;
spin_lock(&ailp->xa_lock);
lip = xfs_ail_max(ailp);
if (lip)
lsn = lip->li_lsn;
spin_unlock(&ailp->xa_lock);
return lsn;
}
/*
* The cursor keeps track of where our current traversal is up to by tracking
* the next item in the list for us. However, for this to be safe, removing an
* object from the AIL needs to invalidate any cursor that points to it. hence
* the traversal cursor needs to be linked to the struct xfs_ail so that
* deletion can search all the active cursors for invalidation.
*/
STATIC void
xfs_trans_ail_cursor_init(
struct xfs_ail *ailp,
struct xfs_ail_cursor *cur)
{
cur->item = NULL;
list_add_tail(&cur->list, &ailp->xa_cursors);
}
/*
* Get the next item in the traversal and advance the cursor. If the cursor
* was invalidated (indicated by a lip of 1), restart the traversal.
*/
struct xfs_log_item *
xfs_trans_ail_cursor_next(
struct xfs_ail *ailp,
struct xfs_ail_cursor *cur)
{
struct xfs_log_item *lip = cur->item;
if ((__psint_t)lip & 1)
lip = xfs_ail_min(ailp);
if (lip)
cur->item = xfs_ail_next(ailp, lip);
return lip;
}
/*
* When the traversal is complete, we need to remove the cursor from the list
* of traversing cursors.
*/
void
xfs_trans_ail_cursor_done(
struct xfs_ail_cursor *cur)
{
cur->item = NULL;
list_del_init(&cur->list);
}
/*
* Invalidate any cursor that is pointing to this item. This is called when an
* item is removed from the AIL. Any cursor pointing to this object is now
* invalid and the traversal needs to be terminated so it doesn't reference a
* freed object. We set the low bit of the cursor item pointer so we can
* distinguish between an invalidation and the end of the list when getting the
* next item from the cursor.
*/
STATIC void
xfs_trans_ail_cursor_clear(
struct xfs_ail *ailp,
struct xfs_log_item *lip)
{
struct xfs_ail_cursor *cur;
list_for_each_entry(cur, &ailp->xa_cursors, list) {
if (cur->item == lip)
cur->item = (struct xfs_log_item *)
((__psint_t)cur->item | 1);
}
}
/*
* Find the first item in the AIL with the given @lsn by searching in ascending
* LSN order and initialise the cursor to point to the next item for a
* ascending traversal. Pass a @lsn of zero to initialise the cursor to the
* first item in the AIL. Returns NULL if the list is empty.
*/
xfs_log_item_t *
xfs_trans_ail_cursor_first(
struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
xfs_lsn_t lsn)
{
xfs_log_item_t *lip;
xfs_trans_ail_cursor_init(ailp, cur);
if (lsn == 0) {
lip = xfs_ail_min(ailp);
goto out;
}
list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
goto out;
}
return NULL;
out:
if (lip)
cur->item = xfs_ail_next(ailp, lip);
return lip;
}
static struct xfs_log_item *
__xfs_trans_ail_cursor_last(
struct xfs_ail *ailp,
xfs_lsn_t lsn)
{
xfs_log_item_t *lip;
list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
return lip;
}
return NULL;
}
/*
* Find the last item in the AIL with the given @lsn by searching in descending
* LSN order and initialise the cursor to point to that item. If there is no
* item with the value of @lsn, then it sets the cursor to the last item with an
* LSN lower than @lsn. Returns NULL if the list is empty.
*/
struct xfs_log_item *
xfs_trans_ail_cursor_last(
struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
xfs_lsn_t lsn)
{
xfs_trans_ail_cursor_init(ailp, cur);
cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
return cur->item;
}
/*
* Splice the log item list into the AIL at the given LSN. We splice to the
* tail of the given LSN to maintain insert order for push traversals. The
* cursor is optional, allowing repeated updates to the same LSN to avoid
* repeated traversals. This should not be called with an empty list.
*/
static void
xfs_ail_splice(
struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
struct list_head *list,
xfs_lsn_t lsn)
{
struct xfs_log_item *lip;
ASSERT(!list_empty(list));
/*
* Use the cursor to determine the insertion point if one is
* provided. If not, or if the one we got is not valid,
* find the place in the AIL where the items belong.
*/
lip = cur ? cur->item : NULL;
if (!lip || (__psint_t) lip & 1)
lip = __xfs_trans_ail_cursor_last(ailp, lsn);
/*
* If a cursor is provided, we know we're processing the AIL
* in lsn order, and future items to be spliced in will
* follow the last one being inserted now. Update the
* cursor to point to that last item, now while we have a
* reliable pointer to it.
*/
if (cur)
cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
/*
* Finally perform the splice. Unless the AIL was empty,
* lip points to the item in the AIL _after_ which the new
* items should go. If lip is null the AIL was empty, so
* the new items go at the head of the AIL.
*/
if (lip)
list_splice(list, &lip->li_ail);
else
list_splice(list, &ailp->xa_ail);
}
/*
* Delete the given item from the AIL. Return a pointer to the item.
*/
static void
xfs_ail_delete(
struct xfs_ail *ailp,
xfs_log_item_t *lip)
{
xfs_ail_check(ailp, lip);
list_del(&lip->li_ail);
xfs_trans_ail_cursor_clear(ailp, lip);
}
static long
xfsaild_push(
struct xfs_ail *ailp)
{
xfs_mount_t *mp = ailp->xa_mount;
struct xfs_ail_cursor cur;
xfs_log_item_t *lip;
xfs_lsn_t lsn;
xfs_lsn_t target;
long tout;
int stuck = 0;
int flushing = 0;
int count = 0;
/*
* If we encountered pinned items or did not finish writing out all
* buffers the last time we ran, force the log first and wait for it
* before pushing again.
*/
if (ailp->xa_log_flush && ailp->xa_last_pushed_lsn == 0 &&
(!list_empty_careful(&ailp->xa_buf_list) ||
xfs_ail_min_lsn(ailp))) {
ailp->xa_log_flush = 0;
XFS_STATS_INC(xs_push_ail_flush);
xfs_log_force(mp, XFS_LOG_SYNC);
}
spin_lock(&ailp->xa_lock);
/* barrier matches the xa_target update in xfs_ail_push() */
smp_rmb();
target = ailp->xa_target;
ailp->xa_target_prev = target;
lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
if (!lip) {
/*
* If the AIL is empty or our push has reached the end we are
* done now.
*/
xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->xa_lock);
goto out_done;
}
XFS_STATS_INC(xs_push_ail);
lsn = lip->li_lsn;
while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
int lock_result;
/*
* Note that iop_push may unlock and reacquire the AIL lock. We
* rely on the AIL cursor implementation to be able to deal with
* the dropped lock.
*/
lock_result = lip->li_ops->iop_push(lip, &ailp->xa_buf_list);
switch (lock_result) {
case XFS_ITEM_SUCCESS:
XFS_STATS_INC(xs_push_ail_success);
trace_xfs_ail_push(lip);
ailp->xa_last_pushed_lsn = lsn;
break;
case XFS_ITEM_FLUSHING:
/*
* The item or its backing buffer is already beeing
* flushed. The typical reason for that is that an
* inode buffer is locked because we already pushed the
* updates to it as part of inode clustering.
*
* We do not want to to stop flushing just because lots
* of items are already beeing flushed, but we need to
* re-try the flushing relatively soon if most of the
* AIL is beeing flushed.
*/
XFS_STATS_INC(xs_push_ail_flushing);
trace_xfs_ail_flushing(lip);
flushing++;
ailp->xa_last_pushed_lsn = lsn;
break;
case XFS_ITEM_PINNED:
XFS_STATS_INC(xs_push_ail_pinned);
trace_xfs_ail_pinned(lip);
stuck++;
ailp->xa_log_flush++;
break;
case XFS_ITEM_LOCKED:
XFS_STATS_INC(xs_push_ail_locked);
trace_xfs_ail_locked(lip);
stuck++;
break;
default:
ASSERT(0);
break;
}
count++;
/*
* Are there too many items we can't do anything with?
*
* If we we are skipping too many items because we can't flush
* them or they are already being flushed, we back off and
* given them time to complete whatever operation is being
* done. i.e. remove pressure from the AIL while we can't make
* progress so traversals don't slow down further inserts and
* removals to/from the AIL.
*
* The value of 100 is an arbitrary magic number based on
* observation.
*/
if (stuck > 100)
break;
lip = xfs_trans_ail_cursor_next(ailp, &cur);
if (lip == NULL)
break;
lsn = lip->li_lsn;
}
xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->xa_lock);
if (xfs_buf_delwri_submit_nowait(&ailp->xa_buf_list))
ailp->xa_log_flush++;
if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
out_done:
/*
* We reached the target or the AIL is empty, so wait a bit
* longer for I/O to complete and remove pushed items from the
* AIL before we start the next scan from the start of the AIL.
*/
tout = 50;
ailp->xa_last_pushed_lsn = 0;
} else if (((stuck + flushing) * 100) / count > 90) {
/*
* Either there is a lot of contention on the AIL or we are
* stuck due to operations in progress. "Stuck" in this case
* is defined as >90% of the items we tried to push were stuck.
*
* Backoff a bit more to allow some I/O to complete before
* restarting from the start of the AIL. This prevents us from
* spinning on the same items, and if they are pinned will all
* the restart to issue a log force to unpin the stuck items.
*/
tout = 20;
ailp->xa_last_pushed_lsn = 0;
} else {
/*
* Assume we have more work to do in a short while.
*/
tout = 10;
}
return tout;
}
static int
xfsaild(
void *data)
{
struct xfs_ail *ailp = data;
long tout = 0; /* milliseconds */
set_freezable();
current->flags |= PF_MEMALLOC;
while (!kthread_freezable_should_stop(NULL)) {
if (tout && tout <= 20)
__set_current_state(TASK_KILLABLE);
else
__set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&ailp->xa_lock);
/*
* Idle if the AIL is empty and we are not racing with a target
* update. We check the AIL after we set the task to a sleep
* state to guarantee that we either catch an xa_target update
* or that a wake_up resets the state to TASK_RUNNING.
* Otherwise, we run the risk of sleeping indefinitely.
*
* The barrier matches the xa_target update in xfs_ail_push().
*/
smp_rmb();
if (!xfs_ail_min(ailp) &&
ailp->xa_target == ailp->xa_target_prev) {
spin_unlock(&ailp->xa_lock);
schedule();
try_to_freeze();
tout = 0;
continue;
}
spin_unlock(&ailp->xa_lock);
if (tout)
schedule_timeout(msecs_to_jiffies(tout));
__set_current_state(TASK_RUNNING);
try_to_freeze();
tout = xfsaild_push(ailp);
}
return 0;
}
/*
* This routine is called to move the tail of the AIL forward. It does this by
* trying to flush items in the AIL whose lsns are below the given
* threshold_lsn.
*
* The push is run asynchronously in a workqueue, which means the caller needs
* to handle waiting on the async flush for space to become available.
* We don't want to interrupt any push that is in progress, hence we only queue
* work if we set the pushing bit approriately.
*
* We do this unlocked - we only need to know whether there is anything in the
* AIL at the time we are called. We don't need to access the contents of
* any of the objects, so the lock is not needed.
*/
void
xfs_ail_push(
struct xfs_ail *ailp,
xfs_lsn_t threshold_lsn)
{
xfs_log_item_t *lip;
lip = xfs_ail_min(ailp);
if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
return;
/*
* Ensure that the new target is noticed in push code before it clears
* the XFS_AIL_PUSHING_BIT.
*/
smp_wmb();
xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
smp_wmb();
wake_up_process(ailp->xa_task);
}
/*
* Push out all items in the AIL immediately
*/
void
xfs_ail_push_all(
struct xfs_ail *ailp)
{
xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
if (threshold_lsn)
xfs_ail_push(ailp, threshold_lsn);
}
/*
* Push out all items in the AIL immediately and wait until the AIL is empty.
*/
void
xfs_ail_push_all_sync(
struct xfs_ail *ailp)
{
struct xfs_log_item *lip;
DEFINE_WAIT(wait);
spin_lock(&ailp->xa_lock);
while ((lip = xfs_ail_max(ailp)) != NULL) {
prepare_to_wait(&ailp->xa_empty, &wait, TASK_UNINTERRUPTIBLE);
ailp->xa_target = lip->li_lsn;
wake_up_process(ailp->xa_task);
spin_unlock(&ailp->xa_lock);
schedule();
spin_lock(&ailp->xa_lock);
}
spin_unlock(&ailp->xa_lock);
finish_wait(&ailp->xa_empty, &wait);
}
/*
* xfs_trans_ail_update - bulk AIL insertion operation.
*
* @xfs_trans_ail_update takes an array of log items that all need to be
* positioned at the same LSN in the AIL. If an item is not in the AIL, it will
* be added. Otherwise, it will be repositioned by removing it and re-adding
* it to the AIL. If we move the first item in the AIL, update the log tail to
* match the new minimum LSN in the AIL.
*
* This function takes the AIL lock once to execute the update operations on
* all the items in the array, and as such should not be called with the AIL
* lock held. As a result, once we have the AIL lock, we need to check each log
* item LSN to confirm it needs to be moved forward in the AIL.
*
* To optimise the insert operation, we delete all the items from the AIL in
* the first pass, moving them into a temporary list, then splice the temporary
* list into the correct position in the AIL. This avoids needing to do an
* insert operation on every item.
*
* This function must be called with the AIL lock held. The lock is dropped
* before returning.
*/
void
xfs_trans_ail_update_bulk(
struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
struct xfs_log_item **log_items,
int nr_items,
xfs_lsn_t lsn) __releases(ailp->xa_lock)
{
xfs_log_item_t *mlip;
int mlip_changed = 0;
int i;
LIST_HEAD(tmp);
ASSERT(nr_items > 0); /* Not required, but true. */
mlip = xfs_ail_min(ailp);
for (i = 0; i < nr_items; i++) {
struct xfs_log_item *lip = log_items[i];
if (lip->li_flags & XFS_LI_IN_AIL) {
/* check if we really need to move the item */
if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
continue;
trace_xfs_ail_move(lip, lip->li_lsn, lsn);
xfs_ail_delete(ailp, lip);
if (mlip == lip)
mlip_changed = 1;
} else {
lip->li_flags |= XFS_LI_IN_AIL;
trace_xfs_ail_insert(lip, 0, lsn);
}
lip->li_lsn = lsn;
list_add(&lip->li_ail, &tmp);
}
if (!list_empty(&tmp))
xfs_ail_splice(ailp, cur, &tmp, lsn);
if (mlip_changed) {
if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
xlog_assign_tail_lsn_locked(ailp->xa_mount);
spin_unlock(&ailp->xa_lock);
xfs_log_space_wake(ailp->xa_mount);
} else {
spin_unlock(&ailp->xa_lock);
}
}
/*
* xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
*
* @xfs_trans_ail_delete_bulk takes an array of log items that all need to
* removed from the AIL. The caller is already holding the AIL lock, and done
* all the checks necessary to ensure the items passed in via @log_items are
* ready for deletion. This includes checking that the items are in the AIL.
*
* For each log item to be removed, unlink it from the AIL, clear the IN_AIL
* flag from the item and reset the item's lsn to 0. If we remove the first
* item in the AIL, update the log tail to match the new minimum LSN in the
* AIL.
*
* This function will not drop the AIL lock until all items are removed from
* the AIL to minimise the amount of lock traffic on the AIL. This does not
* greatly increase the AIL hold time, but does significantly reduce the amount
* of traffic on the lock, especially during IO completion.
*
* This function must be called with the AIL lock held. The lock is dropped
* before returning.
*/
void
xfs_trans_ail_delete_bulk(
struct xfs_ail *ailp,
struct xfs_log_item **log_items,
int nr_items,
int shutdown_type) __releases(ailp->xa_lock)
{
xfs_log_item_t *mlip;
int mlip_changed = 0;
int i;
mlip = xfs_ail_min(ailp);
for (i = 0; i < nr_items; i++) {
struct xfs_log_item *lip = log_items[i];
if (!(lip->li_flags & XFS_LI_IN_AIL)) {
struct xfs_mount *mp = ailp->xa_mount;
spin_unlock(&ailp->xa_lock);
if (!XFS_FORCED_SHUTDOWN(mp)) {
xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
"%s: attempting to delete a log item that is not in the AIL",
__func__);
xfs_force_shutdown(mp, shutdown_type);
}
return;
}
trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
xfs_ail_delete(ailp, lip);
lip->li_flags &= ~XFS_LI_IN_AIL;
lip->li_lsn = 0;
if (mlip == lip)
mlip_changed = 1;
}
if (mlip_changed) {
if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
xlog_assign_tail_lsn_locked(ailp->xa_mount);
if (list_empty(&ailp->xa_ail))
wake_up_all(&ailp->xa_empty);
spin_unlock(&ailp->xa_lock);
xfs_log_space_wake(ailp->xa_mount);
} else {
spin_unlock(&ailp->xa_lock);
}
}
int
xfs_trans_ail_init(
xfs_mount_t *mp)
{
struct xfs_ail *ailp;
ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
if (!ailp)
return ENOMEM;
ailp->xa_mount = mp;
INIT_LIST_HEAD(&ailp->xa_ail);
INIT_LIST_HEAD(&ailp->xa_cursors);
spin_lock_init(&ailp->xa_lock);
INIT_LIST_HEAD(&ailp->xa_buf_list);
init_waitqueue_head(&ailp->xa_empty);
ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
ailp->xa_mount->m_fsname);
if (IS_ERR(ailp->xa_task))
goto out_free_ailp;
mp->m_ail = ailp;
return 0;
out_free_ailp:
kmem_free(ailp);
return ENOMEM;
}
void
xfs_trans_ail_destroy(
xfs_mount_t *mp)
{
struct xfs_ail *ailp = mp->m_ail;
kthread_stop(ailp->xa_task);
kmem_free(ailp);
}
| Scorpio92/linux_kernel_3.16.1 | fs/xfs/xfs_trans_ail.c | C | gpl-2.0 | 20,742 |
/*
* ecgen, tool for generating Elliptic curve domain parameters
* Copyright (C) 2017-2018 J08nY
*/
#include "hex.h"
#include "exhaustive/arg.h"
#include "field.h"
#include "util/bits.h"
#include "util/memory.h"
#include "util/str.h"
static char *hex_point(point_t *point) {
GEN fx = field_elementi(gel(point->point, 1));
GEN fy = field_elementi(gel(point->point, 2));
char *fxs = pari_sprintf("%P0#*x", cfg->hex_digits, fx);
char *fxy = pari_sprintf("%P0#*x", cfg->hex_digits, fy);
char *result = str_joinv(",", fxs, fxy, NULL);
pari_free(fxs);
pari_free(fxy);
return result;
}
static char *hex_points(point_t *points[], size_t len) {
char *p[len];
for (size_t i = 0; i < len; ++i) {
point_t *pt = points[i];
p[i] = hex_point(pt);
}
size_t total = 1;
for (size_t i = 0; i < len; ++i) {
total += strlen(p[i]);
}
char *result = try_calloc(total);
for (size_t i = 0; i < len; ++i) {
strcat(result, p[i]);
try_free(p[i]);
}
return result;
}
CHECK(hex_check_param) {
HAS_ARG(args);
char *search_hex = try_strdup(args->args);
char *p = search_hex;
for (; *p; ++p) *p = (char)tolower(*p);
char *params[OFFSET_END] = {NULL};
bool pari[OFFSET_END] = {false};
if (state >= OFFSET_SEED) {
if (curve->seed && curve->seed->seed) {
params[OFFSET_SEED] = bits_to_hex(curve->seed->seed);
}
}
if (state >= OFFSET_FIELD) {
if (cfg->field == FIELD_PRIME) {
params[OFFSET_FIELD] =
pari_sprintf("%P0#*x", cfg->hex_digits, curve->field);
pari[OFFSET_FIELD] = true;
} else if (cfg->field == FIELD_BINARY) {
}
}
if (state >= OFFSET_A) {
params[OFFSET_A] =
pari_sprintf("%P0#*x", cfg->hex_digits, field_elementi(curve->a));
pari[OFFSET_A] = true;
}
if (state >= OFFSET_B) {
params[OFFSET_B] =
pari_sprintf("%P0#*x", cfg->hex_digits, field_elementi(curve->b));
pari[OFFSET_B] = true;
}
if (state >= OFFSET_ORDER) {
params[OFFSET_ORDER] =
pari_sprintf("%P0#*x", cfg->hex_digits, curve->order);
pari[OFFSET_ORDER] = true;
}
if (state >= OFFSET_GENERATORS) {
char *subgroups[curve->ngens];
for (size_t i = 0; i < curve->ngens; ++i) {
subgroups[i] = hex_point(curve->generators[i]->generator);
}
params[OFFSET_GENERATORS] = str_join(",", subgroups, curve->ngens);
for (size_t i = 0; i < curve->ngens; ++i) {
try_free(subgroups[i]);
}
}
if (state >= OFFSET_POINTS) {
char *subgroups[curve->ngens];
for (size_t i = 0; i < curve->ngens; ++i) {
subgroups[i] = hex_points(curve->generators[i]->points,
curve->generators[i]->npoints);
}
params[OFFSET_POINTS] = str_join(",", subgroups, curve->ngens);
for (size_t i = 0; i < curve->ngens; ++i) {
try_free(subgroups[i]);
}
}
int result = OFFSET_FIELD - state;
for (offset_e i = OFFSET_SEED; i < OFFSET_END; ++i) {
if (params[i]) {
if (result != 1 && strstr(params[i], search_hex)) {
result = 1;
}
if (pari[i]) {
pari_free(params[i]);
} else {
try_free(params[i]);
}
}
}
try_free(search_hex);
return result;
} | J08nY/ecgen | src/gen/hex.c | C | gpl-2.0 | 3,044 |
/* linux/drivers/mmc/host/sdhci-s3c.c
*
* Copyright 2008 Openmoko Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* SDHCI (HSMMC) support for Samsung SoC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <mach/regs-clock.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/mmc/host.h>
#include <plat/sdhci.h>
#include <plat/regs-sdhci.h>
#include <plat/clock.h>
#include <plat/clock-clksrc.h>
#include <linux/kernel.h>
#include "sdhci.h"
#define MAX_BUS_CLK (1)
void mmc_valid(u16 valid, struct mmc_host *host);
/* add by cym 20130328 */
#if CONFIG_MTK_COMBO_MT66XX
/* skip do suspend for mmc2 host. But it would fail because clock is stopped
* but NOT restored automatically after resume.
*/
#define MMC2_SKIP_SUSPEND (0)
/* Enable the following pm capabilities for mmc2 host for wlan suspend/resume:
* MMC_PM_KEEP_POWER
* MMC_PM_WAKE_SDIO_IRQ
* MMC_PM_IGNORE_PM_NOTIFY
* It works on mldk4x12.
*/
#define MMC2_DO_SUSPEND_KEEP_PWR (1)
#endif
/* end add */
/**
* struct sdhci_s3c - S3C SDHCI instance
* @host: The SDHCI host created
* @pdev: The platform device we where created from.
* @ioarea: The resource created when we claimed the IO area.
* @pdata: The platform data for this controller.
* @cur_clk: The index of the current bus clock.
* @clk_io: The clock for the internal bus interface.
* @clk_bus: The clocks that are available for the SD/MMC bus clock.
*/
struct sdhci_s3c {
struct sdhci_host *host;
struct platform_device *pdev;
struct resource *ioarea;
struct s3c_sdhci_platdata *pdata;
unsigned int cur_clk;
int ext_cd_irq;
int ext_cd_gpio;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
};
static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
{
return sdhci_priv(host);
}
/**
* get_curclk - convert ctrl2 register to clock source number
* @ctrl2: Control2 register value.
*/
static u32 get_curclk(u32 ctrl2)
{
ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
ctrl2 >>= S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
return ctrl2;
}
static void sdhci_s3c_check_sclk(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
u32 tmp = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
if (get_curclk(tmp) != ourhost->cur_clk) {
dev_dbg(&ourhost->pdev->dev, "restored ctrl2 clock setting\n");
tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
writel(tmp, host->ioaddr + 0x80);
}
}
/**
* sdhci_s3c_get_max_clk - callback to get maximum clock frequency.
* @host: The SDHCI host instance.
*
* Callback to return the maximum clock rate acheivable by the controller.
*/
static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
struct clk *busclk;
unsigned int rate, max;
int clk;
/* note, a reset will reset the clock source */
sdhci_s3c_check_sclk(host);
for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) {
busclk = ourhost->clk_bus[clk];
if (!busclk)
continue;
rate = clk_get_rate(busclk);
if (rate > max)
max = rate;
}
return max;
}
static inline struct clksrc_clk *to_clksrc(struct clk *clk)//lisw sd
{
return container_of(clk, struct clksrc_clk, clk);
}
/**
* sdhci_s3c_consider_clock - consider one the bus clocks for current setting
* @ourhost: Our SDHCI instance.
* @src: The source clock index.
* @wanted: The clock frequency wanted.
*/
static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
unsigned int src,
unsigned int wanted)
{
unsigned long rate;
struct clk *clk_sclk_mmc = ourhost->clk_bus[0];//lisw sd : for different clk source structure
struct clksrc_clk *clksrc_parent = to_clksrc(clk_sclk_mmc->parent);
struct clk *clksrc = clksrc_parent->sources->sources[src];
int div;
if (!clksrc)
return UINT_MAX;
/*
* Clock divider's step is different as 1 from that of host controller
* when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
*/
// if (ourhost->pdata->clk_type) {
// rate = clk_round_rate(clksrc, wanted);
// return wanted - rate;
// }
rate = clk_get_rate(clksrc);
for (div = 1; div < 256; div++) {
if ((rate / div) <= wanted)
break;
}
dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
src, rate, wanted, rate / div);
return (wanted - (rate / div));
}
/**
* sdhci_s3c_set_clock_src - callback on clock change
* @host: The SDHCI host being changed
* @clock: The clock rate being requested.
*
* When the card's clock is going to be changed, look at the new frequency
* and find the best clock source to go with it.
*/
int s3c_setrate_clksrc_two_div(struct clk *clk, unsigned long rate);//lisw sd
int clk_set_parent(struct clk *clk, struct clk *parent);//lisw sd
static void sdhci_s3c_set_clock_src(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_s3c *ourhost = to_s3c(host);
struct clk *clk_sclk_mmc = ourhost->clk_bus[0];//lisw sd : for different clk source structure
struct clksrc_clk *clksrc_parent = to_clksrc(clk_sclk_mmc->parent);
unsigned int best = UINT_MAX;
unsigned int delta;
int best_src = 0;
int src;
u32 ctrl;
/* don't bother if the clock is going off. */
if (clock == 0)
return;
if(MAX_BUS_CLK==1){
for (src = 6; src < clksrc_parent->sources->nr_sources; src++) {//lisw ms : set 6 as firsrt selection because XXTI 24Mhz is not stable
delta = sdhci_s3c_consider_clock(ourhost, src, clock);
if (delta < best) {
best = delta;
best_src = src;
}
}
}
else
return;
//printk("selected source %d, clock %d, delta %d\n",
// best_src, clock, best);
/* select the new clock source */
if (ourhost->cur_clk != best_src) {
struct clk *clk = clksrc_parent->sources->sources[best_src];
/* turn clock off to card before changing clock source */
writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
ourhost->cur_clk = best_src;
host->max_clk = clk_get_rate(clk);
// ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
// ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
// ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
// writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
//***use base clock select funtion in CMU instread in SD host controller***//
if (clk_set_parent(clk_sclk_mmc->parent, clk))
printk("Unable to set parent %s of clock %s.\n",
clk->name, clksrc_parent->clk.name);
clk_sclk_mmc->parent->parent = clk;
}
// s3c_setrate_clksrc_two_div(clk_sclk_mmc,clock);
/* reconfigure the hardware for new clock rate */
{
struct mmc_ios ios;
ios.clock = clock;
if (ourhost->pdata->cfg_card)
(ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
&ios, NULL);
}
}
/**
* sdhci_s3c_set_clock - callback on clock change
* @host: The SDHCI host being changed
* @clock: The clock rate being requested.
*
* When the card's clock is going to be changed, look at the new frequency
* and find the best clock source to go with it.
*/
static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_s3c *ourhost = to_s3c(host);
unsigned int best = UINT_MAX;
unsigned int delta;
int best_src = 0;
int src;
u32 ctrl;
/* don't bother if the clock is going off. */
if (clock == 0)
return;
for (src = 0; src < MAX_BUS_CLK; src++) {
delta = sdhci_s3c_consider_clock(ourhost, src, clock);
if (delta < best) {
best = delta;
best_src = src;
}
}
dev_dbg(&ourhost->pdev->dev,
"selected source %d, clock %d, delta %d\n",
best_src, clock, best);
/* select the new clock source */
if (ourhost->cur_clk != best_src) {
struct clk *clk = ourhost->clk_bus[best_src];
/* turn clock off to card before changing clock source */
writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
ourhost->cur_clk = best_src;
host->max_clk = clk_get_rate(clk);
ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
}
/* reconfigure the hardware for new clock rate */
{
struct mmc_ios ios;
ios.clock = clock;
if (ourhost->pdata->cfg_card)
(ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
&ios, NULL);
}
}
/**
* sdhci_s3c_get_min_clock - callback to get minimal supported clock value
* @host: The SDHCI host being queried
*
* To init mmc host properly a minimal clock value is needed. For high system
* bus clock's values the standard formula gives values out of allowed range.
* The clock still can be set to lower values, if clock source other then
* system bus is selected.
*/
static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
unsigned int delta, min = UINT_MAX;
int src;
for (src = 0; src < MAX_BUS_CLK; src++) {
delta = sdhci_s3c_consider_clock(ourhost, src, 0);
if (delta == UINT_MAX)
continue;
/* delta is a negative value in this case */
if (-delta < min)
min = -delta;
}
return min;
}
/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
host->max_clk = clk_get_rate(to_clksrc(ourhost->clk_bus[0]->parent)->sources->sources[ourhost->cur_clk]);
return host->max_clk;//clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
}
/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
/*
* initial clock can be in the frequency range of
* 100KHz-400KHz, so we set it as max value.
*/
return sdhci_cmu_get_max_clock(host)/((1 << to_clksrc(ourhost->clk_bus[0]->parent)->reg_div.size)*(1 << to_clksrc(ourhost->clk_bus[0])->reg_div.size));
}
/* sdhci_cmu_set_clock - callback on clock change.*/
static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_s3c *ourhost = to_s3c(host);
/* don't bother if the clock is going off */
if (clock == 0)
return;
// sdhci_s3c_set_clock(host, clock);
sdhci_s3c_set_clock_src(host, clock);
// clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
s3c_setrate_clksrc_two_div(ourhost->clk_bus[0],clock);
host->clock = clock;
}
/**
* sdhci_s3c_platform_8bit_width - support 8bit buswidth
* @host: The SDHCI host being queried
* @width: MMC_BUS_WIDTH_ macro for the bus width being requested
*
* We have 8-bit width support but is not a v3 controller.
* So we add platform_8bit_width() and support 8bit width.
*/
static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
{
u8 ctrl;
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
switch (width) {
case MMC_BUS_WIDTH_8:
ctrl |= SDHCI_CTRL_8BITBUS;
ctrl &= ~SDHCI_CTRL_4BITBUS;
break;
case MMC_BUS_WIDTH_4:
ctrl |= SDHCI_CTRL_4BITBUS;
ctrl &= ~SDHCI_CTRL_8BITBUS;
break;
default:
break;
}
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
return 0;
}
//ly 20111123
int sdhci_s3c_get_cd(struct sdhci_host *mmc)//lisw sd hotplug
{
int detect;
struct sdhci_s3c* sc = sdhci_priv(mmc);
if(mmc->mmc->index== 1){ //ch0+1 is emmc ch2 is TF
int status = gpio_get_value(sc->pdata->ext_cd_gpio);
if (sc->pdata->ext_cd_gpio_invert)
status = !status;
if (status){
detect = true;
}else{
detect = false;
}
}else{
detect = true;
}
return detect;
}
static struct sdhci_ops sdhci_s3c_ops = {
.get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock,
.get_min_clock = sdhci_s3c_get_min_clock,
.platform_8bit_width = sdhci_s3c_platform_8bit_width,
.get_cd = sdhci_s3c_get_cd,//ly
};
void sdhci_init(struct sdhci_host *host, int soft);
static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
{
struct sdhci_host *host = platform_get_drvdata(dev);
unsigned long flags;
if (host) {
spin_lock_irqsave(&host->lock, flags);
if (state) {
dev_dbg(&dev->dev, "card inserted.\n");
host->flags &= ~SDHCI_DEVICE_DEAD;
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
sdhci_init(host,0);//lisw sd hotplug : for reinitialize host controller each time plugin
} else {
dev_dbg(&dev->dev, "card removed.\n");
host->flags |= SDHCI_DEVICE_DEAD;
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
tasklet_schedule(&host->card_tasklet);
spin_unlock_irqrestore(&host->lock, flags);
}
}
static irqreturn_t sdhci_s3c_gpio_card_detect_thread(int irq, void *dev_id)
{
struct sdhci_s3c *sc = dev_id;
int status = gpio_get_value(sc->pdata->ext_cd_gpio);
if (sc->pdata->ext_cd_gpio_invert)
status = !status;
if(status){//card present
mmc_valid(1,sc->host->mmc);//lisw sd hotplug
}
else{//card absent
mmc_valid(0,sc->host->mmc);
}
sdhci_s3c_notify_change(sc->pdev, status);
return IRQ_HANDLED;
}
void sdhci_s3c_sdio_card_detect(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
//printk(KERN_DEBUG "+%s", __FUNCTION__);
printk("+%s\n", __FUNCTION__);
// mmc_detect_change(host->mmc, msecs_to_jiffies(60));
mmc_detect_change(host->mmc, msecs_to_jiffies(500));
//printk(KERN_DEBUG "-%s", __FUNCTION__);
printk("-%s\n", __FUNCTION__);
}
EXPORT_SYMBOL(sdhci_s3c_sdio_card_detect);
static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
{
struct s3c_sdhci_platdata *pdata = sc->pdata;
struct device *dev = &sc->pdev->dev;
if (gpio_request(pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) {
sc->ext_cd_gpio = pdata->ext_cd_gpio;
sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio);
enable_irq_wake(sc->ext_cd_irq);//lisw hotplug during suspend
if (sc->ext_cd_irq &&
request_threaded_irq(sc->ext_cd_irq, NULL,
sdhci_s3c_gpio_card_detect_thread,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
dev_name(dev), sc) == 0) {
int status = gpio_get_value(sc->ext_cd_gpio);
if (pdata->ext_cd_gpio_invert)
status = !status;
sdhci_s3c_notify_change(sc->pdev, status);
} else {
dev_warn(dev, "cannot request irq for card detect\n");
sc->ext_cd_irq = 0;
}
} else {
dev_err(dev, "cannot request gpio for card detect\n");
}
}
static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
{
struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct sdhci_host *host;
struct sdhci_s3c *sc;
struct resource *res;
int ret, irq, ptr, clks;
if (!pdata) {
dev_err(dev, "no device data specified\n");
return -ENOENT;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "no irq specified\n");
return irq;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "no memory specified\n");
return -ENOENT;
}
host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
if (IS_ERR(host)) {
dev_err(dev, "sdhci_alloc_host() failed\n");
return PTR_ERR(host);
}
sc = sdhci_priv(host);
sc->host = host;
sc->pdev = pdev;
sc->pdata = pdata;
sc->ext_cd_gpio = -1; /* invalid gpio number */
platform_set_drvdata(pdev, host);
sc->clk_io = clk_get(dev, "hsmmc");
if (IS_ERR(sc->clk_io)) {
dev_err(dev, "failed to get io clock\n");
ret = PTR_ERR(sc->clk_io);
goto err_io_clk;
}
/* enable the local io clock and keep it running for the moment. */
clk_enable(sc->clk_io);
for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
struct clk *clk;
char *name = pdata->clocks[ptr];
if (name == NULL)
continue;
clk = clk_get(dev, name);
if (IS_ERR(clk)) {
dev_err(dev, "failed to get clock %s\n", name);
continue;
}
clks++;
sc->clk_bus[ptr] = clk;
/*
* save current clock index to know which clock bus
* is used later in overriding functions.
*/
sc->cur_clk = 7;// clock sources select number
clk_set_parent(clk->parent,to_clksrc(clk->parent)->sources->sources[7]);
clk_enable(clk);
dev_info(dev, "clock source %d: %s (%ld Hz)\n",
ptr, name, clk_get_rate(clk));
}
if (clks == 0) {
dev_err(dev, "failed to find any bus clocks\n");
ret = -ENOENT;
goto err_no_busclks;
}
sc->ioarea = request_mem_region(res->start, resource_size(res),
mmc_hostname(host->mmc));
if (!sc->ioarea) {
dev_err(dev, "failed to reserve register area\n");
ret = -ENXIO;
goto err_req_regs;
}
host->ioaddr = ioremap_nocache(res->start, resource_size(res));
if (!host->ioaddr) {
dev_err(dev, "failed to map registers\n");
ret = -ENXIO;
goto err_req_regs;
}
/* Ensure we have minimal gpio selected CMD/CLK/Detect */
if (pdata->cfg_gpio)
pdata->cfg_gpio(pdev, pdata->max_width);
host->hw_name = "samsung-hsmmc";
host->ops = &sdhci_s3c_ops;
host->quirks = 0;
host->irq = irq;
/* Setup quirks for the controller */
host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
#ifndef CONFIG_MMC_SDHCI_S3C_DMA
/* we currently see overruns on errors, so disable the SDMA
* support as well. */
host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
/* It seems we do not get an DATA transfer complete on non-busy
* transfers, not sure if this is a problem with this specific
* SDHCI block, or a missing configuration that needs to be set. */
host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
/* This host supports the Auto CMD12 */
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->mmc->caps = MMC_CAP_NONREMOVABLE;
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE);
/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
/*
* If controller does not have internal clock divider,
* we can use overriding functions instead of default.
*/
if (pdata->clk_type) {
sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
}
/* It supports additional host capabilities if needed */
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
/* add by cym 20130328 */
#if MMC2_SKIP_SUSPEND
if (2 == host->mmc->index) {
/* to avoid redundant mmc_detect_change() called by mmc_pm_notify() */
printk(KERN_INFO "%s: set MMC_PM_IGNORE_PM_NOTIFY for %s pm_flags\n",
__func__, mmc_hostname(host->mmc));
host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
}
#elif MMC2_DO_SUSPEND_KEEP_PWR
if (2 == host->mmc->index) {
/* to avoid redundant mmc_detect_change() called by mmc_pm_notify() */
printk(KERN_INFO "%s: set MMC_PM_IGNORE_PM_NOTIFY for %s pm_flags\n",
__func__, mmc_hostname(host->mmc));
host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
printk(KERN_INFO "%s: set MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ for %s pm_caps\n",
__func__, mmc_hostname(host->mmc));
host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
}
#endif
/* end add */
ret = sdhci_add_host(host);
if (ret) {
dev_err(dev, "sdhci_add_host() failed\n");
goto err_add_host;
}
/* The following two methods of card detection might call
sdhci_s3c_notify_change() immediately, so they can be called
only after sdhci_add_host(). Setup errors are ignored. */
if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init)
pdata->ext_cd_init(&sdhci_s3c_notify_change);
if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
gpio_is_valid(pdata->ext_cd_gpio))
sdhci_s3c_setup_card_detect_gpio(sc);
return 0;
err_add_host:
release_resource(sc->ioarea);
kfree(sc->ioarea);
err_req_regs:
for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
clk_disable(sc->clk_bus[ptr]);
clk_put(sc->clk_bus[ptr]);
}
err_no_busclks:
clk_disable(sc->clk_io);
clk_put(sc->clk_io);
err_io_clk:
sdhci_free_host(host);
return ret;
}
static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
{
struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_s3c *sc = sdhci_priv(host);
int ptr;
if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup)
pdata->ext_cd_cleanup(&sdhci_s3c_notify_change);
if (sc->ext_cd_irq)
free_irq(sc->ext_cd_irq, sc);
if (gpio_is_valid(sc->ext_cd_gpio))
gpio_free(sc->ext_cd_gpio);
sdhci_remove_host(host, 1);
for (ptr = 0; ptr < 3; ptr++) {
if (sc->clk_bus[ptr]) {
clk_disable(sc->clk_bus[ptr]);
clk_put(sc->clk_bus[ptr]);
}
}
clk_disable(sc->clk_io);
clk_put(sc->clk_io);
iounmap(host->ioaddr);
release_resource(sc->ioarea);
kfree(sc->ioarea);
sdhci_free_host(host);
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef CONFIG_PM
static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
{
struct sdhci_host *host = platform_get_drvdata(dev);
/* add by cym 20130328 */
#if MMC2_SKIP_SUSPEND
/* mmc2 is s3c_device_hsmmc3 */
if (2 == host->mmc->index) {
printk(KERN_INFO "skip %s for %s dev->id(%d)\n", __func__, mmc_hostname(host->mmc), dev->id);
return 0;
}
else {
printk(KERN_INFO "%s for %s dev->id(%d)\n", __func__, mmc_hostname(host->mmc), dev->id);
}
#endif
/* end add */
sdhci_suspend_host(host, pm);
return 0;
}
static int sdhci_s3c_resume(struct platform_device *dev)
{
struct sdhci_host *host = platform_get_drvdata(dev);
struct sdhci_s3c *sc = sdhci_priv(host);
/* add by cym 20130328 */
#if MMC2_SKIP_SUSPEND
/* mmc2 is s3c_device_hsmmc3 */
if (2 == host->mmc->index) {
printk(KERN_INFO "skip %s for %s dev->id(%d)\n", __func__, mmc_hostname(host->mmc), dev->id);
return 0;
}
else {
printk(KERN_INFO "%s for %s dev->id(%d)\n", __func__, mmc_hostname(host->mmc), dev->id);
}
#endif
/* end add */
sdhci_resume_host(host);
/* add by cym 20130328 */
#ifndef MMC2_SKIP_SUSPEND
/* end add */
if(!(host->mmc->caps & MMC_CAP_NONREMOVABLE)){//lisw hotplug during suspend
int status = gpio_get_value(sc->ext_cd_gpio);
if (sc->pdata->ext_cd_gpio_invert)
status = !status;
sdhci_s3c_notify_change(sc->pdev, status);
}
/* add by cym 20130328 */
#endif
/* end add */
return 0;
}
#else
#define sdhci_s3c_suspend NULL
#define sdhci_s3c_resume NULL
#endif
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
.remove = __devexit_p(sdhci_s3c_remove),
.suspend = sdhci_s3c_suspend,
.resume = sdhci_s3c_resume,
.driver = {
.owner = THIS_MODULE,
.name = "s3c-sdhci",
},
};
static int __init sdhci_s3c_init(void)
{
return platform_driver_register(&sdhci_s3c_driver);
}
static void __exit sdhci_s3c_exit(void)
{
platform_driver_unregister(&sdhci_s3c_driver);
}
module_init(sdhci_s3c_init);
module_exit(sdhci_s3c_exit);
MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:s3c-sdhci");
| Android-Dongyf/itop-kernel | drivers/mmc/host/sdhci-s3c-ori.c | C | gpl-2.0 | 23,555 |
/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/*----------------------------------------------------------------------------*/
// COPYRIGHT(C) FUJITSU LIMITED 2011-2012
/*----------------------------------------------------------------------------*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <mach/hardware.h>
#include <linux/io.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/fb.h>
#include "mdp.h"
#include "msm_fb.h"
#include "mdp4.h"
/* FUJITSU:2012-05-29 DISP add prevent set_lut interruption start */
extern struct mutex msm_fb_ioctl_lut_sem;
/* FUJITSU:2012-05-29 DISP add prevent set_lut interruption end */
static struct mdp4_overlay_pipe *mddi_pipe;
static struct msm_fb_data_type *mddi_mfd;
static int busy_wait_cnt;
static int vsync_start_y_adjust = 4;
static int dmap_vsync_enable;
/* FUJITSU:2011-12-22 add sandstorm blocker --> */
#if defined(CONFIG_FB_MSM_MDDI)
extern void mddi_panel_fullscrn_update_notify(void);
#endif
/* FUJITSU:2011-12-22 add sandstorm blocker <-- */
void mdp_dmap_vsync_set(int enable)
{
dmap_vsync_enable = enable;
}
int mdp_dmap_vsync_get(void)
{
return dmap_vsync_enable;
}
void mdp4_mddi_vsync_enable(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe, int which)
{
uint32 start_y, data, tear_en;
tear_en = (1 << which);
if ((mfd->use_mdp_vsync) && (mfd->ibuf.vsync_enable) &&
(mfd->panel_info.lcd.vsync_enable)) {
if (mdp_hw_revision < MDP4_REVISION_V2_1) {
/* need dmas dmap switch */
if (which == 0 && dmap_vsync_enable == 0 &&
mfd->panel_info.lcd.rev < 2) /* dma_p */
return;
}
if (vsync_start_y_adjust <= pipe->dst_y)
start_y = pipe->dst_y - vsync_start_y_adjust;
else
start_y = (mfd->total_lcd_lines - 1) -
(vsync_start_y_adjust - pipe->dst_y);
if (which == 0)
MDP_OUTP(MDP_BASE + 0x210, start_y); /* primary */
else
MDP_OUTP(MDP_BASE + 0x214, start_y); /* secondary */
data = inpdw(MDP_BASE + 0x20c);
data |= tear_en;
MDP_OUTP(MDP_BASE + 0x20c, data);
} else {
data = inpdw(MDP_BASE + 0x20c);
data &= ~tear_en;
MDP_OUTP(MDP_BASE + 0x20c, data);
}
}
#define WHOLESCREEN
void mdp4_overlay_update_lcd(struct msm_fb_data_type *mfd)
{
MDPIBUF *iBuf = &mfd->ibuf;
uint8 *src;
int ptype;
uint32 mddi_ld_param;
uint16 mddi_vdo_packet_reg;
struct mdp4_overlay_pipe *pipe;
int ret;
if (mfd->key != MFD_KEY)
return;
/* FUJITSU:2012-05-29 DISP add prevent set_lut interruption start */
mutex_lock(&msm_fb_ioctl_lut_sem);
/* FUJITSU:2012-05-29 DISP add prevent set_lut interruption end */
mddi_mfd = mfd; /* keep it */
/* FUJITSU:2011-12-22 add sandstorm blocker --> */
#if defined(CONFIG_FB_MSM_MDDI) /* if update RAM image size is full screen size */
if (iBuf->dma_h == mfd->panel_info.yres)
{
mddi_panel_fullscrn_update_notify();
}
#endif
/* FUJITSU:2011-12-22 add sandstorm blocker <-- */
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
if (mddi_pipe == NULL) {
ptype = mdp4_overlay_format2type(mfd->fb_imgType);
if (ptype < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
if (pipe == NULL)
printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
pipe->pipe_used++;
pipe->mixer_num = MDP4_MIXER0;
pipe->src_format = mfd->fb_imgType;
mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_MDDI);
ret = mdp4_overlay_format2pipe(pipe);
if (ret < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
mddi_pipe = pipe; /* keep it */
mddi_pipe->blt_end = 1; /* mark as end */
mddi_ld_param = 0;
mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
if (mdp_hw_revision == MDP4_REVISION_V2_1) {
uint32 data;
data = inpdw(MDP_BASE + 0x0028);
data &= ~0x0300; /* bit 8, 9, MASTER4 */
if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */
data |= 0x0200;
else
data |= 0x0100;
MDP_OUTP(MDP_BASE + 0x00028, data);
}
if (mfd->panel_info.type == MDDI_PANEL) {
if (mfd->panel_info.pdest == DISPLAY_1)
mddi_ld_param = 0;
else
mddi_ld_param = 1;
} else {
mddi_ld_param = 2;
}
MDP_OUTP(MDP_BASE + 0x00090, mddi_ld_param);
if (mfd->panel_info.bpp == 24)
MDP_OUTP(MDP_BASE + 0x00094,
(MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg);
else if (mfd->panel_info.bpp == 16)
MDP_OUTP(MDP_BASE + 0x00094,
(MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg);
else
MDP_OUTP(MDP_BASE + 0x00094,
(MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
MDP_OUTP(MDP_BASE + 0x00098, 0x01);
} else {
pipe = mddi_pipe;
}
/* 0 for dma_p, client_id = 0 */
MDP_OUTP(MDP_BASE + 0x00090, 0);
src = (uint8 *) iBuf->buf;
#ifdef WHOLESCREEN
{
struct fb_info *fbi;
fbi = mfd->fbi;
pipe->src_height = fbi->var.yres;
pipe->src_width = fbi->var.xres;
pipe->src_h = fbi->var.yres;
pipe->src_w = fbi->var.xres;
pipe->src_y = 0;
pipe->src_x = 0;
pipe->dst_h = fbi->var.yres;
pipe->dst_w = fbi->var.xres;
pipe->dst_y = 0;
pipe->dst_x = 0;
pipe->srcp0_addr = (uint32)src;
pipe->srcp0_ystride = fbi->fix.line_length;
}
#else
if (mdp4_overlay_active(MDP4_MIXER0)) {
struct fb_info *fbi;
fbi = mfd->fbi;
pipe->src_height = fbi->var.yres;
pipe->src_width = fbi->var.xres;
pipe->src_h = fbi->var.yres;
pipe->src_w = fbi->var.xres;
pipe->src_y = 0;
pipe->src_x = 0;
pipe->dst_h = fbi->var.yres;
pipe->dst_w = fbi->var.xres;
pipe->dst_y = 0;
pipe->dst_x = 0;
pipe->srcp0_addr = (uint32) src;
pipe->srcp0_ystride = fbi->fix.line_length;
} else {
/* starting input address */
src += (iBuf->dma_x + iBuf->dma_y * iBuf->ibuf_width)
* iBuf->bpp;
pipe->src_height = iBuf->dma_h;
pipe->src_width = iBuf->dma_w;
pipe->src_h = iBuf->dma_h;
pipe->src_w = iBuf->dma_w;
pipe->src_y = 0;
pipe->src_x = 0;
pipe->dst_h = iBuf->dma_h;
pipe->dst_w = iBuf->dma_w;
pipe->dst_y = iBuf->dma_y;
pipe->dst_x = iBuf->dma_x;
pipe->srcp0_addr = (uint32) src;
pipe->srcp0_ystride = iBuf->ibuf_width * iBuf->bpp;
}
#endif
pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
mdp4_overlay_rgb_setup(pipe);
mdp4_mixer_stage_up(pipe);
mdp4_overlayproc_cfg(pipe);
mdp4_overlay_dmap_xy(pipe);
mdp4_overlay_dmap_cfg(mfd, 0);
mdp4_mddi_vsync_enable(mfd, pipe, 0);
/* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
int mdp4_mddi_overlay_blt_offset(int *off)
{
if (mdp_hw_revision < MDP4_REVISION_V2_1) { /* need dmas dmap switch */
if (mddi_pipe->blt_end ||
(mdp4_overlay_mixer_play(mddi_pipe->mixer_num) == 0)) {
*off = -1;
return -EINVAL;
}
} else {
/* no dmas dmap switch */
if (mddi_pipe->blt_end) {
*off = -1;
return -EINVAL;
}
}
if (mddi_pipe->blt_cnt & 0x01)
*off = mddi_pipe->src_height * mddi_pipe->src_width * 3;
else
*off = 0;
return 0;
}
void mdp4_mddi_overlay_blt(ulong addr)
{
unsigned long flag;
spin_lock_irqsave(&mdp_spin_lock, flag);
if (addr) {
mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
mdp_intr_mask |= INTR_DMA_P_DONE;
outp32(MDP_INTR_ENABLE, mdp_intr_mask);
mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mddi_pipe->blt_cnt = 0;
mddi_pipe->blt_end = 0;
mddi_pipe->blt_addr = addr;
} else {
mddi_pipe->blt_end = 1; /* mark as end */
}
spin_unlock_irqrestore(&mdp_spin_lock, flag);
}
void mdp4_blt_xy_update(struct mdp4_overlay_pipe *pipe)
{
uint32 off, addr;
int bpp;
char *overlay_base;
if (pipe->blt_addr == 0)
return;
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
if (pipe->dmap_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
addr = pipe->blt_addr + off;
/* dmap */
MDP_OUTP(MDP_BASE + 0x90008, addr);
/* overlay 0 */
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
outpdw(overlay_base + 0x000c, addr);
outpdw(overlay_base + 0x001c, addr);
}
/*
* mdp4_dmap_done_mddi: called from isr
*/
void mdp4_dma_p_done_mddi(void)
{
if (mddi_pipe->blt_end) {
mddi_pipe->blt_addr = 0;
mdp_intr_mask &= ~INTR_DMA_P_DONE;
outp32(MDP_INTR_ENABLE, mdp_intr_mask);
mdp4_overlayproc_cfg(mddi_pipe);
mdp4_overlay_dmap_xy(mddi_pipe);
}
/*
* single buffer, no need to increase
* mdd_pipe->dmap_cnt here
*/
}
/*
* mdp4_overlay0_done_mddi: called from isr
*/
void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma)
{
mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
dma->busy = FALSE;
/* FUJITSU:2012-05-29 DISP add prevent set_lut interruption start */
mutex_unlock(&msm_fb_ioctl_lut_sem);
/* FUJITSU:2012-05-29 DISP add prevent set_lut interruption end */
complete(&dma->comp);
mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK,
MDP_BLOCK_POWER_OFF, TRUE);
if (busy_wait_cnt)
busy_wait_cnt--;
pr_debug("%s: ISR-done\n", __func__);
if (mddi_pipe->blt_addr) {
if (mddi_pipe->blt_cnt == 0) {
mdp4_overlayproc_cfg(mddi_pipe);
mdp4_overlay_dmap_xy(mddi_pipe);
mddi_pipe->ov_cnt = 0;
mddi_pipe->dmap_cnt = 0;
/* BLT start from next frame */
} else {
mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON,
FALSE);
mdp4_blt_xy_update(mddi_pipe);
outpdw(MDP_BASE + 0x000c, 0x0); /* start DMAP */
}
mddi_pipe->blt_cnt++;
mddi_pipe->ov_cnt++;
}
}
void mdp4_mddi_overlay_restore(void)
{
if (mddi_mfd == NULL)
return;
pr_debug("%s: resotre, pid=%d\n", __func__, current->pid);
if (mddi_mfd->panel_power_on == 0)
return;
if (mddi_mfd && mddi_pipe) {
mdp4_mddi_dma_busy_wait(mddi_mfd);
mdp4_overlay_update_lcd(mddi_mfd);
mdp4_mddi_overlay_kickoff(mddi_mfd, mddi_pipe);
mddi_mfd->dma_update_flag = 1;
}
if (mdp_hw_revision < MDP4_REVISION_V2_1) /* need dmas dmap switch */
mdp4_mddi_overlay_dmas_restore();
}
/*
* mdp4_mddi_cmd_dma_busy_wait: check mddi link activity
* dsi link is a shared resource and it can only be used
* while it is in idle state.
* ov_mutex need to be acquired before call this function.
*/
void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd)
{
unsigned long flag;
int need_wait = 0;
pr_debug("%s: START, pid=%d\n", __func__, current->pid);
spin_lock_irqsave(&mdp_spin_lock, flag);
if (mfd->dma->busy == TRUE) {
if (busy_wait_cnt == 0)
INIT_COMPLETION(mfd->dma->comp);
busy_wait_cnt++;
need_wait++;
}
spin_unlock_irqrestore(&mdp_spin_lock, flag);
if (need_wait) {
/* wait until DMA finishes the current job */
pr_debug("%s: PENDING, pid=%d\n", __func__, current->pid);
wait_for_completion(&mfd->dma->comp);
}
pr_debug("%s: DONE, pid=%d\n", __func__, current->pid);
}
void mdp4_mddi_kickoff_video(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
pr_debug("%s: pid=%d\n", __func__, current->pid);
mdp4_mddi_overlay_kickoff(mfd, pipe);
}
void mdp4_mddi_kickoff_ui(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
pr_debug("%s: pid=%d\n", __func__, current->pid);
mdp4_mddi_overlay_kickoff(mfd, pipe);
}
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
/* change mdp clk while mdp is idle` */
mdp4_set_perf_level();
if (mdp_hw_revision == MDP4_REVISION_V2_1) {
if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_UNSET)) {
uint32 data;
data = inpdw(MDP_BASE + 0x0028);
data &= ~0x0300; /* bit 8, 9, MASTER4 */
if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */
data |= 0x0200;
else
data |= 0x0100;
MDP_OUTP(MDP_BASE + 0x00028, data);
mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_UNSET,
false);
}
if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_SET)) {
uint32 data;
data = inpdw(MDP_BASE + 0x0028);
data &= ~0x0300; /* bit 8, 9, MASTER4 */
MDP_OUTP(MDP_BASE + 0x00028, data);
mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_SET, false);
}
}
mdp_enable_irq(MDP_OVERLAY0_TERM);
mfd->dma->busy = TRUE;
/* start OVERLAY pipe */
mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd);
mdp4_stat.kickoff_ov0++;
}
void mdp4_dma_s_update_lcd(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
MDPIBUF *iBuf = &mfd->ibuf;
uint32 outBpp = iBuf->bpp;
uint16 mddi_vdo_packet_reg;
uint32 dma_s_cfg_reg;
dma_s_cfg_reg = 0;
if (mfd->fb_imgType == MDP_RGBA_8888)
dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR; /* on purpose */
else if (mfd->fb_imgType == MDP_BGR_565)
dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR;
else
dma_s_cfg_reg |= DMA_PACK_PATTERN_RGB;
if (outBpp == 4)
dma_s_cfg_reg |= (1 << 26); /* xRGB8888 */
else if (outBpp == 2)
dma_s_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
dma_s_cfg_reg |= DMA_DITHER_EN;
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* PIXELSIZE */
MDP_OUTP(MDP_BASE + 0xa0004, (pipe->dst_h << 16 | pipe->dst_w));
MDP_OUTP(MDP_BASE + 0xa0008, pipe->srcp0_addr); /* ibuf address */
MDP_OUTP(MDP_BASE + 0xa000c, pipe->srcp0_ystride);/* ystride */
if (mfd->panel_info.bpp == 24) {
dma_s_cfg_reg |= DMA_DSTC0G_8BITS | /* 666 18BPP */
DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
} else if (mfd->panel_info.bpp == 18) {
dma_s_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */
DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
} else {
dma_s_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */
DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
}
MDP_OUTP(MDP_BASE + 0xa0010, (pipe->dst_y << 16) | pipe->dst_x);
/* 1 for dma_s, client_id = 0 */
MDP_OUTP(MDP_BASE + 0x00090, 1);
mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
if (mfd->panel_info.bpp == 24)
MDP_OUTP(MDP_BASE + 0x00094,
(MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg);
else if (mfd->panel_info.bpp == 16)
MDP_OUTP(MDP_BASE + 0x00094,
(MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg);
else
MDP_OUTP(MDP_BASE + 0x00094,
(MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
MDP_OUTP(MDP_BASE + 0x00098, 0x01);
MDP_OUTP(MDP_BASE + 0xa0000, dma_s_cfg_reg);
mdp4_mddi_vsync_enable(mfd, pipe, 1);
/* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
/* change mdp clk while mdp is idle` */
mdp4_set_perf_level();
mdp_enable_irq(MDP_DMA_S_TERM);
mfd->dma->busy = TRUE;
mfd->ibuf_flushed = TRUE;
/* start dma_s pipe */
mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd);
mdp4_stat.kickoff_dmas++;
/* wait until DMA finishes the current job */
wait_for_completion(&mfd->dma->comp);
mdp_disable_irq(MDP_DMA_S_TERM);
}
void mdp4_mddi_overlay_dmas_restore(void)
{
/* mutex held by caller */
if (mddi_mfd && mddi_pipe) {
mdp4_mddi_dma_busy_wait(mddi_mfd);
mdp4_dma_s_update_lcd(mddi_mfd, mddi_pipe);
mdp4_mddi_dma_s_kickoff(mddi_mfd, mddi_pipe);
mddi_mfd->dma_update_flag = 1;
}
}
void mdp4_mddi_overlay(struct msm_fb_data_type *mfd)
{
mutex_lock(&mfd->dma->ov_mutex);
if (mfd && mfd->panel_power_on) {
mdp4_mddi_dma_busy_wait(mfd);
mdp4_overlay_update_lcd(mfd);
if (mdp_hw_revision < MDP4_REVISION_V2_1) {
/* dmas dmap switch */
if (mdp4_overlay_mixer_play(mddi_pipe->mixer_num)
== 0) {
mdp4_dma_s_update_lcd(mfd, mddi_pipe);
mdp4_mddi_dma_s_kickoff(mfd, mddi_pipe);
} else
mdp4_mddi_kickoff_ui(mfd, mddi_pipe);
} else /* no dams dmap switch */
mdp4_mddi_kickoff_ui(mfd, mddi_pipe);
/* signal if pan function is waiting for the update completion */
if (mfd->pan_waiting) {
mfd->pan_waiting = FALSE;
complete(&mfd->pan_comp);
}
}
mutex_unlock(&mfd->dma->ov_mutex);
}
int mdp4_mddi_overlay_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct msm_fb_data_type *mfd = info->par;
mutex_lock(&mfd->dma->ov_mutex);
if (mfd && mfd->panel_power_on) {
mdp4_mddi_dma_busy_wait(mfd);
mdp_hw_cursor_update(info, cursor);
}
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
| hiikezoe/android_kernel_fujitsu_f12nad | drivers/video/msm/mdp4_overlay_mddi.c | C | gpl-2.0 | 16,555 |
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
// prototypes declarations
char** split(char *str, char *delimitators);
int main(void)
{
char** result;
char str[] = "1-0:0.0.1(132412515)\n1-0:0.2.3.4(654236517)\n";
result = split(str, "()\n");
for(size_t index = 0; *(result + index); index++)
{
printf("%s\n",*(result + index));
}
free(result);
return 0;
}
char** split(char *str, char* delimitators)
{
int count = 0;
size_t index = 0;
char **parse;
char *tmp = str;
char *token;
char *last_delimitator;
while(*tmp)
{
/******************************************
si *tmp contiene uno de los deliitadores
strchr devolvera la direcccion de este sino
devolvera una direccion nula
*******************************************/
if(strchr(delimitators, *tmp) != NULL)
{
count++;
last_delimitator = tmp;
}
tmp++;
}
//Espacio para datos ubicados luego del ultimo delimitador
count += last_delimitator < (str + strlen(str) - 1);
//Espacio para el caracter nulo del arreglo de punteros
count++;
parse = malloc(sizeof(char*) * count);
token = strtok(str, delimitators);
while(token != NULL)
{
*(parse + index++) = token;
token = strtok(NULL, delimitators);
}
*(parse + index) = 0;
return parse;
}
| marcelodavid/c_utilities | string/split.c | C | gpl-2.0 | 1,269 |
/*
* ALSA SoC Voice Codec Interface for TI DAVINCI processor
*
* Copyright (C) 2010 Texas Instruments.
*
* Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/davinci_voicecodec.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include "davinci-pcm.h"
#include "davinci-i2s.h"
#include "davinci-vcif.h"
#define MOD_REG_BIT(val, mask, set) do { \
if (set) { \
val |= mask; \
} else { \
val &= ~mask; \
} \
} while (0)
struct davinci_vcif_dev {
struct davinci_vc *davinci_vc;
struct davinci_pcm_dma_params dma_params[2];
};
static void davinci_vcif_start(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct davinci_vcif_dev *davinci_vcif_dev =
rtd->dai->cpu_dai->private_data;
struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
u32 w;
/* Start the sample generator and enable transmitter/receiver */
w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTDAC, 0);
else
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTADC, 0);
writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
}
static void davinci_vcif_stop(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct davinci_vcif_dev *davinci_vcif_dev =
rtd->dai->cpu_dai->private_data;
struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
u32 w;
/* Reset transmitter/receiver and sample rate/frame sync generators */
w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTDAC, 1);
else
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTADC, 1);
writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
}
static int davinci_vcif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct davinci_vcif_dev *davinci_vcif_dev = dai->private_data;
struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
struct davinci_pcm_dma_params *dma_params =
&davinci_vcif_dev->dma_params[substream->stream];
u32 w;
/* Restart the codec before setup */
davinci_vcif_stop(substream);
davinci_vcif_start(substream);
/* General line settings */
writel(DAVINCI_VC_CTRL_MASK, davinci_vc->base + DAVINCI_VC_CTRL);
writel(DAVINCI_VC_INT_MASK, davinci_vc->base + DAVINCI_VC_INTCLR);
writel(DAVINCI_VC_INT_MASK, davinci_vc->base + DAVINCI_VC_INTEN);
w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
/* Determine xfer data type */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_U8:
dma_params->data_type = 0;
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
DAVINCI_VC_CTRL_RD_UNSIGNED |
DAVINCI_VC_CTRL_WD_BITS_8 |
DAVINCI_VC_CTRL_WD_UNSIGNED, 1);
break;
case SNDRV_PCM_FORMAT_S8:
dma_params->data_type = 1;
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
DAVINCI_VC_CTRL_WD_BITS_8, 1);
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_UNSIGNED |
DAVINCI_VC_CTRL_WD_UNSIGNED, 0);
break;
case SNDRV_PCM_FORMAT_S16_LE:
dma_params->data_type = 2;
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
DAVINCI_VC_CTRL_RD_UNSIGNED |
DAVINCI_VC_CTRL_WD_BITS_8 |
DAVINCI_VC_CTRL_WD_UNSIGNED, 0);
break;
default:
printk(KERN_WARNING "davinci-vcif: unsupported PCM format");
return -EINVAL;
}
dma_params->acnt = dma_params->data_type;
writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
return 0;
}
static int davinci_vcif_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
int ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
davinci_vcif_start(substream);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
davinci_vcif_stop(substream);
break;
default:
ret = -EINVAL;
}
return ret;
}
#define DAVINCI_VCIF_RATES SNDRV_PCM_RATE_8000_48000
static struct snd_soc_dai_ops davinci_vcif_dai_ops = {
.trigger = davinci_vcif_trigger,
.hw_params = davinci_vcif_hw_params,
};
struct snd_soc_dai davinci_vcif_dai = {
.name = "davinci-vcif",
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = DAVINCI_VCIF_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = DAVINCI_VCIF_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE,},
.ops = &davinci_vcif_dai_ops,
};
EXPORT_SYMBOL_GPL(davinci_vcif_dai);
static int davinci_vcif_probe(struct platform_device *pdev)
{
struct davinci_vc *davinci_vc = platform_get_drvdata(pdev);
struct davinci_vcif_dev *davinci_vcif_dev;
int ret;
davinci_vcif_dev = kzalloc(sizeof(struct davinci_vcif_dev), GFP_KERNEL);
if (!davinci_vc) {
dev_dbg(&pdev->dev,
"could not allocate memory for private data\n");
return -ENOMEM;
}
/* DMA tx params */
davinci_vcif_dev->davinci_vc = davinci_vc;
davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel =
davinci_vc->davinci_vcif.dma_tx_channel;
davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr =
davinci_vc->davinci_vcif.dma_tx_addr;
/* DMA rx params */
davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel =
davinci_vc->davinci_vcif.dma_rx_channel;
davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr =
davinci_vc->davinci_vcif.dma_rx_addr;
davinci_vcif_dai.dev = &pdev->dev;
davinci_vcif_dai.capture.dma_data = davinci_vcif_dev->dma_params;
davinci_vcif_dai.playback.dma_data = davinci_vcif_dev->dma_params;
davinci_vcif_dai.private_data = davinci_vcif_dev;
ret = snd_soc_register_dai(&davinci_vcif_dai);
if (ret != 0) {
dev_err(&pdev->dev, "could not register dai\n");
goto fail;
}
return 0;
fail:
kfree(davinci_vcif_dev);
return ret;
}
static int davinci_vcif_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&davinci_vcif_dai);
return 0;
}
static struct platform_driver davinci_vcif_driver = {
.probe = davinci_vcif_probe,
.remove = davinci_vcif_remove,
.driver = {
.name = "davinci_vcif",
.owner = THIS_MODULE,
},
};
static int __init davinci_vcif_init(void)
{
return platform_driver_probe(&davinci_vcif_driver, davinci_vcif_probe);
}
module_init(davinci_vcif_init);
static void __exit davinci_vcif_exit(void)
{
platform_driver_unregister(&davinci_vcif_driver);
}
module_exit(davinci_vcif_exit);
MODULE_AUTHOR("Miguel Aguilar");
MODULE_DESCRIPTION("Texas Instruments DaVinci ASoC Voice Codec Interface");
MODULE_LICENSE("GPL");
| bticino/linux | sound/soc/davinci/davinci-vcif.c | C | gpl-2.0 | 7,528 |
/* MI Command Set - disassemble commands.
Copyright (C) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc.
Contributed by Cygnus Solutions (a Red Hat company).
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "defs.h"
#include "target.h"
#include "value.h"
#include "mi-cmds.h"
#include "mi-getopt.h"
#include "gdb_string.h"
#include "ui-out.h"
#include "disasm.h"
/* The arguments to be passed on the command line and parsed here are:
either:
START-ADDRESS: address to start the disassembly at.
END-ADDRESS: address to end the disassembly at.
or:
FILENAME: The name of the file where we want disassemble from.
LINE: The line around which we want to disassemble. It will
disassemble the function that contins that line.
HOW_MANY: Number of disassembly lines to display. In mixed mode, it
is the number of disassembly lines only, not counting the source
lines.
always required:
MODE: 0 or 1 for disassembly only, or mixed source and disassembly,
respectively. */
void
mi_cmd_disassemble (char *command, char **argv, int argc)
{
CORE_ADDR start;
int mixed_source_and_assembly;
struct symtab *s;
/* Which options have we processed ... */
int file_seen = 0;
int line_seen = 0;
int num_seen = 0;
int start_seen = 0;
int end_seen = 0;
/* ... and their corresponding value. */
char *file_string = NULL;
int line_num = -1;
int how_many = -1;
CORE_ADDR low = 0;
CORE_ADDR high = 0;
/* Options processing stuff. */
int optind = 0;
char *optarg;
enum opt
{
FILE_OPT, LINE_OPT, NUM_OPT, START_OPT, END_OPT
};
static struct mi_opt opts[] = {
{"f", FILE_OPT, 1},
{"l", LINE_OPT, 1},
{"n", NUM_OPT, 1},
{"s", START_OPT, 1},
{"e", END_OPT, 1},
{ 0, 0, 0 }
};
/* Get the options with their arguments. Keep track of what we
encountered. */
while (1)
{
int opt = mi_getopt ("mi_cmd_disassemble", argc, argv, opts,
&optind, &optarg);
if (opt < 0)
break;
switch ((enum opt) opt)
{
case FILE_OPT:
file_string = xstrdup (optarg);
file_seen = 1;
break;
case LINE_OPT:
line_num = atoi (optarg);
line_seen = 1;
break;
case NUM_OPT:
how_many = atoi (optarg);
num_seen = 1;
break;
case START_OPT:
low = parse_and_eval_address (optarg);
start_seen = 1;
break;
case END_OPT:
high = parse_and_eval_address (optarg);
end_seen = 1;
break;
}
}
argv += optind;
argc -= optind;
/* Allow only filename + linenum (with how_many which is not
required) OR start_addr + and_addr */
if (!((line_seen && file_seen && num_seen && !start_seen && !end_seen)
|| (line_seen && file_seen && !num_seen && !start_seen && !end_seen)
|| (!line_seen && !file_seen && !num_seen && start_seen && end_seen)))
error
("mi_cmd_disassemble: Usage: ( [-f filename -l linenum [-n howmany]] | [-s startaddr -e endaddr]) [--] mixed_mode.");
if (argc != 1)
error
("mi_cmd_disassemble: Usage: [-f filename -l linenum [-n howmany]] [-s startaddr -e endaddr] [--] mixed_mode.");
mixed_source_and_assembly = atoi (argv[0]);
if ((mixed_source_and_assembly != 0) && (mixed_source_and_assembly != 1))
error (_("mi_cmd_disassemble: Mixed_mode argument must be 0 or 1."));
/* We must get the function beginning and end where line_num is
contained. */
if (line_seen && file_seen)
{
s = lookup_symtab (file_string);
if (s == NULL)
error (_("mi_cmd_disassemble: Invalid filename."));
if (!find_line_pc (s, line_num, &start))
error (_("mi_cmd_disassemble: Invalid line number"));
if (find_pc_partial_function (start, NULL, &low, &high) == 0)
error (_("mi_cmd_disassemble: No function contains specified address"));
}
gdb_disassembly (uiout,
file_string,
line_num,
mixed_source_and_assembly, how_many, low, high);
}
| sergiodj/gdb-sergio | gdb/mi/mi-cmd-disas.c | C | gpl-2.0 | 4,527 |
/**
******************************************************************************
* @file TIM/TIM_TimeBase/Src/main.c
* @author MCD Application Team
* @version V1.2.3
* @date 09-October-2015
* @brief This sample code shows how to use STM32F4xx TIM HAL API to generate
* a time base.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT(c) 2015 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "main.h"
/** @addtogroup STM32F4xx_HAL_Examples
* @{
*/
/** @addtogroup TIM_TimeBase
* @{
*/
/* Private typedef -----------------------------------------------------------*/
/* Private define ------------------------------------------------------------*/
/* Private macro -------------------------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
/* Timer handler declaration */
TIM_HandleTypeDef TimHandle;
uint32_t uwPrescalerValue = 0;
/* Private function prototypes -----------------------------------------------*/
static void SystemClock_Config(void);
static void Error_Handler(void);
/* Private functions ---------------------------------------------------------*/
/**
* @brief Main program
* @param None
* @retval None
*/
int main(void)
{
/* STM32F4xx HAL library initialization:
- Configure the Flash prefetch, instruction and Data caches
- Configure the Systick to generate an interrupt each 1 msec
- Set NVIC Group Priority to 4
- Global MSP (MCU Support Package) initialization
*/
HAL_Init();
/* Configure the system clock to 168 MHz */
SystemClock_Config();
/* Configure LED1 and LED3 */
BSP_LED_Init(LED1);
BSP_LED_Init(LED3);
/*##-1- Configure the TIM peripheral #######################################*/
/* -----------------------------------------------------------------------
In this example TIM3 input clock (TIM3CLK) is set to 2 * APB1 clock (PCLK1),
since APB1 prescaler is different from 1.
TIM3CLK = 2 * PCLK1
PCLK1 = HCLK / 4
=> TIM3CLK = HCLK / 2 = SystemCoreClock /2
To get TIM3 counter clock at 10 KHz, the Prescaler is computed as following:
Prescaler = (TIM3CLK / TIM3 counter clock) - 1
Prescaler = ((SystemCoreClock /2) /10 KHz) - 1
Note:
SystemCoreClock variable holds HCLK frequency and is defined in system_stm32f4xx.c file.
Each time the core clock (HCLK) changes, user had to update SystemCoreClock
variable value. Otherwise, any configuration based on this variable will be incorrect.
This variable is updated in three ways:
1) by calling CMSIS function SystemCoreClockUpdate()
2) by calling HAL API function HAL_RCC_GetSysClockFreq()
3) each time HAL_RCC_ClockConfig() is called to configure the system clock frequency
----------------------------------------------------------------------- */
/* Compute the prescaler value to have TIM3 counter clock equal to 10 KHz */
uwPrescalerValue = (uint32_t) ((SystemCoreClock /2) / 10000) - 1;
/* Set TIMx instance */
TimHandle.Instance = TIMx;
/* Initialize TIM3 peripheral as follow:
+ Period = 10000 - 1
+ Prescaler = ((SystemCoreClock/2)/10000) - 1
+ ClockDivision = 0
+ Counter direction = Up
*/
TimHandle.Init.Period = 10000 - 1;
TimHandle.Init.Prescaler = uwPrescalerValue;
TimHandle.Init.ClockDivision = 0;
TimHandle.Init.CounterMode = TIM_COUNTERMODE_UP;
if(HAL_TIM_Base_Init(&TimHandle) != HAL_OK)
{
/* Initialization Error */
Error_Handler();
}
/*##-2- Start the TIM Base generation in interrupt mode ####################*/
/* Start Channel1 */
if(HAL_TIM_Base_Start_IT(&TimHandle) != HAL_OK)
{
/* Starting Error */
Error_Handler();
}
/* Infinite loop */
while (1)
{
}
}
/**
* @brief Period elapsed callback in non blocking mode
* @param htim : TIM handle
* @retval None
*/
void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim)
{
BSP_LED_Toggle(LED1);
}
/**
* @brief This function is executed in case of error occurrence.
* @param None
* @retval None
*/
static void Error_Handler(void)
{
/* Turn LED3 on */
BSP_LED_On(LED3);
while(1)
{
}
}
/**
* @brief System Clock Configuration
* The system Clock is configured as follow :
* System Clock source = PLL (HSE)
* SYSCLK(Hz) = 168000000
* HCLK(Hz) = 168000000
* AHB Prescaler = 1
* APB1 Prescaler = 4
* APB2 Prescaler = 2
* HSE Frequency(Hz) = 25000000
* PLL_M = 25
* PLL_N = 336
* PLL_P = 2
* PLL_Q = 7
* VDD(V) = 3.3
* Main regulator output voltage = Scale1 mode
* Flash Latency(WS) = 5
* @param None
* @retval None
*/
static void SystemClock_Config(void)
{
RCC_ClkInitTypeDef RCC_ClkInitStruct;
RCC_OscInitTypeDef RCC_OscInitStruct;
/* Enable Power Control clock */
__HAL_RCC_PWR_CLK_ENABLE();
/* The voltage scaling allows optimizing the power consumption when the device is
clocked below the maximum system frequency, to update the voltage scaling value
regarding system frequency refer to product datasheet. */
__HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE1);
/* Enable HSE Oscillator and activate PLL with HSE as source */
RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSE;
RCC_OscInitStruct.HSEState = RCC_HSE_ON;
RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON;
RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSE;
RCC_OscInitStruct.PLL.PLLM = 25;
RCC_OscInitStruct.PLL.PLLN = 336;
RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV2;
RCC_OscInitStruct.PLL.PLLQ = 7;
HAL_RCC_OscConfig(&RCC_OscInitStruct);
/* Select PLL as system clock source and configure the HCLK, PCLK1 and PCLK2
clocks dividers */
RCC_ClkInitStruct.ClockType = (RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2);
RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK;
RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1;
RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV4;
RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV2;
HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_5);
/* STM32F405x/407x/415x/417x Revision Z devices: prefetch is supported */
if (HAL_GetREVID() == 0x1001)
{
/* Enable the Flash prefetch */
__HAL_FLASH_PREFETCH_BUFFER_ENABLE();
}
}
#ifdef USE_FULL_ASSERT
/**
* @brief Reports the name of the source file and the source line number
* where the assert_param error has occurred.
* @param file: pointer to the source file name
* @param line: assert_param error line source number
* @retval None
*/
void assert_failed(uint8_t* file, uint32_t line)
{
/* User can add his own implementation to report the file name and line number,
ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */
/* Infinite loop */
while (1)
{
}
}
#endif
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
| kamfor/STMF4 | STM/Projects/STM324xG_EVAL/Examples/TIM/TIM_TimeBase/Src/main.c | C | gpl-2.0 | 9,525 |
/* SPDX-License-Identifier: LGPL-2.1+ */
/***
This file is part of systemd.
Copyright 2010 Lennart Poettering
systemd is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
systemd is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with systemd; If not, see <http://www.gnu.org/licenses/>.
***/
#include <errno.h>
#include <sys/epoll.h>
#include <sys/stat.h>
#include <unistd.h>
#include "libudev.h"
#include "alloc-util.h"
#include "dbus-swap.h"
#include "escape.h"
#include "exit-status.h"
#include "fd-util.h"
#include "format-util.h"
#include "fstab-util.h"
#include "parse-util.h"
#include "path-util.h"
#include "process-util.h"
#include "special.h"
#include "string-table.h"
#include "string-util.h"
#include "swap.h"
#include "udev-util.h"
#include "unit-name.h"
#include "unit.h"
#include "virt.h"
static const UnitActiveState state_translation_table[_SWAP_STATE_MAX] = {
[SWAP_DEAD] = UNIT_INACTIVE,
[SWAP_ACTIVATING] = UNIT_ACTIVATING,
[SWAP_ACTIVATING_DONE] = UNIT_ACTIVE,
[SWAP_ACTIVE] = UNIT_ACTIVE,
[SWAP_DEACTIVATING] = UNIT_DEACTIVATING,
[SWAP_DEACTIVATING_SIGTERM] = UNIT_DEACTIVATING,
[SWAP_DEACTIVATING_SIGKILL] = UNIT_DEACTIVATING,
[SWAP_FAILED] = UNIT_FAILED
};
static int swap_dispatch_timer(sd_event_source *source, usec_t usec, void *userdata);
static int swap_dispatch_io(sd_event_source *source, int fd, uint32_t revents, void *userdata);
static bool SWAP_STATE_WITH_PROCESS(SwapState state) {
return IN_SET(state,
SWAP_ACTIVATING,
SWAP_ACTIVATING_DONE,
SWAP_DEACTIVATING,
SWAP_DEACTIVATING_SIGTERM,
SWAP_DEACTIVATING_SIGKILL);
}
static void swap_unset_proc_swaps(Swap *s) {
assert(s);
if (!s->from_proc_swaps)
return;
s->parameters_proc_swaps.what = mfree(s->parameters_proc_swaps.what);
s->from_proc_swaps = false;
}
static int swap_set_devnode(Swap *s, const char *devnode) {
Hashmap *swaps;
Swap *first;
int r;
assert(s);
r = hashmap_ensure_allocated(&UNIT(s)->manager->swaps_by_devnode, &path_hash_ops);
if (r < 0)
return r;
swaps = UNIT(s)->manager->swaps_by_devnode;
if (s->devnode) {
first = hashmap_get(swaps, s->devnode);
LIST_REMOVE(same_devnode, first, s);
if (first)
hashmap_replace(swaps, first->devnode, first);
else
hashmap_remove(swaps, s->devnode);
s->devnode = mfree(s->devnode);
}
if (devnode) {
s->devnode = strdup(devnode);
if (!s->devnode)
return -ENOMEM;
first = hashmap_get(swaps, s->devnode);
LIST_PREPEND(same_devnode, first, s);
return hashmap_replace(swaps, first->devnode, first);
}
return 0;
}
static void swap_init(Unit *u) {
Swap *s = SWAP(u);
assert(s);
assert(UNIT(s)->load_state == UNIT_STUB);
s->timeout_usec = u->manager->default_timeout_start_usec;
s->exec_context.std_output = u->manager->default_std_output;
s->exec_context.std_error = u->manager->default_std_error;
s->parameters_proc_swaps.priority = s->parameters_fragment.priority = -1;
s->control_command_id = _SWAP_EXEC_COMMAND_INVALID;
u->ignore_on_isolate = true;
}
static void swap_unwatch_control_pid(Swap *s) {
assert(s);
if (s->control_pid <= 0)
return;
unit_unwatch_pid(UNIT(s), s->control_pid);
s->control_pid = 0;
}
static void swap_done(Unit *u) {
Swap *s = SWAP(u);
assert(s);
swap_unset_proc_swaps(s);
swap_set_devnode(s, NULL);
s->what = mfree(s->what);
s->parameters_fragment.what = mfree(s->parameters_fragment.what);
s->parameters_fragment.options = mfree(s->parameters_fragment.options);
s->exec_runtime = exec_runtime_unref(s->exec_runtime, false);
exec_command_done_array(s->exec_command, _SWAP_EXEC_COMMAND_MAX);
s->control_command = NULL;
dynamic_creds_unref(&s->dynamic_creds);
swap_unwatch_control_pid(s);
s->timer_event_source = sd_event_source_unref(s->timer_event_source);
}
static int swap_arm_timer(Swap *s, usec_t usec) {
int r;
assert(s);
if (s->timer_event_source) {
r = sd_event_source_set_time(s->timer_event_source, usec);
if (r < 0)
return r;
return sd_event_source_set_enabled(s->timer_event_source, SD_EVENT_ONESHOT);
}
if (usec == USEC_INFINITY)
return 0;
r = sd_event_add_time(
UNIT(s)->manager->event,
&s->timer_event_source,
CLOCK_MONOTONIC,
usec, 0,
swap_dispatch_timer, s);
if (r < 0)
return r;
(void) sd_event_source_set_description(s->timer_event_source, "swap-timer");
return 0;
}
static int swap_add_device_dependencies(Swap *s) {
assert(s);
if (!s->what)
return 0;
if (!s->from_fragment)
return 0;
if (is_device_path(s->what))
return unit_add_node_dependency(UNIT(s), s->what, MANAGER_IS_SYSTEM(UNIT(s)->manager), UNIT_BINDS_TO, UNIT_DEPENDENCY_FILE);
else
/* File based swap devices need to be ordered after
* systemd-remount-fs.service, since they might need a
* writable file system. */
return unit_add_dependency_by_name(UNIT(s), UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
}
static int swap_add_default_dependencies(Swap *s) {
int r;
assert(s);
if (!UNIT(s)->default_dependencies)
return 0;
if (!MANAGER_IS_SYSTEM(UNIT(s)->manager))
return 0;
if (detect_container() > 0)
return 0;
/* swap units generated for the swap dev links are missing the
* ordering dep against the swap target. */
r = unit_add_dependency_by_name(UNIT(s), UNIT_BEFORE, SPECIAL_SWAP_TARGET, NULL, true, UNIT_DEPENDENCY_DEFAULT);
if (r < 0)
return r;
return unit_add_two_dependencies_by_name(UNIT(s), UNIT_BEFORE, UNIT_CONFLICTS, SPECIAL_UMOUNT_TARGET, NULL, true, UNIT_DEPENDENCY_DEFAULT);
}
static int swap_verify(Swap *s) {
_cleanup_free_ char *e = NULL;
int r;
if (UNIT(s)->load_state != UNIT_LOADED)
return 0;
r = unit_name_from_path(s->what, ".swap", &e);
if (r < 0)
return log_unit_error_errno(UNIT(s), r, "Failed to generate unit name from path: %m");
if (!unit_has_name(UNIT(s), e)) {
log_unit_error(UNIT(s), "Value of What= and unit name do not match, not loading.");
return -EINVAL;
}
if (s->exec_context.pam_name && s->kill_context.kill_mode != KILL_CONTROL_GROUP) {
log_unit_error(UNIT(s), "Unit has PAM enabled. Kill mode must be set to 'control-group'. Refusing to load.");
return -EINVAL;
}
return 0;
}
static int swap_load_devnode(Swap *s) {
_cleanup_udev_device_unref_ struct udev_device *d = NULL;
struct stat st;
const char *p;
assert(s);
if (stat(s->what, &st) < 0 || !S_ISBLK(st.st_mode))
return 0;
d = udev_device_new_from_devnum(UNIT(s)->manager->udev, 'b', st.st_rdev);
if (!d)
return 0;
p = udev_device_get_devnode(d);
if (!p)
return 0;
return swap_set_devnode(s, p);
}
static int swap_load(Unit *u) {
int r;
Swap *s = SWAP(u);
assert(s);
assert(u->load_state == UNIT_STUB);
/* Load a .swap file */
if (SWAP(u)->from_proc_swaps)
r = unit_load_fragment_and_dropin_optional(u);
else
r = unit_load_fragment_and_dropin(u);
if (r < 0)
return r;
if (u->load_state == UNIT_LOADED) {
if (UNIT(s)->fragment_path)
s->from_fragment = true;
if (!s->what) {
if (s->parameters_fragment.what)
s->what = strdup(s->parameters_fragment.what);
else if (s->parameters_proc_swaps.what)
s->what = strdup(s->parameters_proc_swaps.what);
else {
r = unit_name_to_path(u->id, &s->what);
if (r < 0)
return r;
}
if (!s->what)
return -ENOMEM;
}
path_kill_slashes(s->what);
if (!UNIT(s)->description) {
r = unit_set_description(u, s->what);
if (r < 0)
return r;
}
r = unit_require_mounts_for(UNIT(s), s->what, UNIT_DEPENDENCY_IMPLICIT);
if (r < 0)
return r;
r = swap_add_device_dependencies(s);
if (r < 0)
return r;
r = swap_load_devnode(s);
if (r < 0)
return r;
r = unit_patch_contexts(u);
if (r < 0)
return r;
r = unit_add_exec_dependencies(u, &s->exec_context);
if (r < 0)
return r;
r = unit_set_default_slice(u);
if (r < 0)
return r;
r = swap_add_default_dependencies(s);
if (r < 0)
return r;
}
return swap_verify(s);
}
static int swap_setup_unit(
Manager *m,
const char *what,
const char *what_proc_swaps,
int priority,
bool set_flags) {
_cleanup_free_ char *e = NULL;
bool delete = false;
Unit *u = NULL;
int r;
SwapParameters *p;
assert(m);
assert(what);
assert(what_proc_swaps);
r = unit_name_from_path(what, ".swap", &e);
if (r < 0)
return log_unit_error_errno(u, r, "Failed to generate unit name from path: %m");
u = manager_get_unit(m, e);
if (u &&
SWAP(u)->from_proc_swaps &&
!path_equal(SWAP(u)->parameters_proc_swaps.what, what_proc_swaps)) {
log_error("Swap %s appeared twice with different device paths %s and %s", e, SWAP(u)->parameters_proc_swaps.what, what_proc_swaps);
return -EEXIST;
}
if (!u) {
delete = true;
r = unit_new_for_name(m, sizeof(Swap), e, &u);
if (r < 0)
goto fail;
SWAP(u)->what = strdup(what);
if (!SWAP(u)->what) {
r = -ENOMEM;
goto fail;
}
unit_add_to_load_queue(u);
} else
delete = false;
p = &SWAP(u)->parameters_proc_swaps;
if (!p->what) {
p->what = strdup(what_proc_swaps);
if (!p->what) {
r = -ENOMEM;
goto fail;
}
}
if (set_flags) {
SWAP(u)->is_active = true;
SWAP(u)->just_activated = !SWAP(u)->from_proc_swaps;
}
SWAP(u)->from_proc_swaps = true;
p->priority = priority;
unit_add_to_dbus_queue(u);
return 0;
fail:
log_unit_warning_errno(u, r, "Failed to load swap unit: %m");
if (delete)
unit_free(u);
return r;
}
static int swap_process_new(Manager *m, const char *device, int prio, bool set_flags) {
_cleanup_udev_device_unref_ struct udev_device *d = NULL;
struct udev_list_entry *item = NULL, *first = NULL;
const char *dn;
struct stat st;
int r;
assert(m);
r = swap_setup_unit(m, device, device, prio, set_flags);
if (r < 0)
return r;
/* If this is a block device, then let's add duplicates for
* all other names of this block device */
if (stat(device, &st) < 0 || !S_ISBLK(st.st_mode))
return 0;
d = udev_device_new_from_devnum(m->udev, 'b', st.st_rdev);
if (!d)
return 0;
/* Add the main device node */
dn = udev_device_get_devnode(d);
if (dn && !streq(dn, device))
swap_setup_unit(m, dn, device, prio, set_flags);
/* Add additional units for all symlinks */
first = udev_device_get_devlinks_list_entry(d);
udev_list_entry_foreach(item, first) {
const char *p;
/* Don't bother with the /dev/block links */
p = udev_list_entry_get_name(item);
if (streq(p, device))
continue;
if (path_startswith(p, "/dev/block/"))
continue;
if (stat(p, &st) >= 0)
if (!S_ISBLK(st.st_mode) ||
st.st_rdev != udev_device_get_devnum(d))
continue;
swap_setup_unit(m, p, device, prio, set_flags);
}
return r;
}
static void swap_set_state(Swap *s, SwapState state) {
SwapState old_state;
Swap *other;
assert(s);
old_state = s->state;
s->state = state;
if (!SWAP_STATE_WITH_PROCESS(state)) {
s->timer_event_source = sd_event_source_unref(s->timer_event_source);
swap_unwatch_control_pid(s);
s->control_command = NULL;
s->control_command_id = _SWAP_EXEC_COMMAND_INVALID;
}
if (state != old_state)
log_unit_debug(UNIT(s), "Changed %s -> %s", swap_state_to_string(old_state), swap_state_to_string(state));
unit_notify(UNIT(s), state_translation_table[old_state], state_translation_table[state], true);
/* If there other units for the same device node have a job
queued it might be worth checking again if it is runnable
now. This is necessary, since swap_start() refuses
operation with EAGAIN if there's already another job for
the same device node queued. */
LIST_FOREACH_OTHERS(same_devnode, other, s)
if (UNIT(other)->job)
job_add_to_run_queue(UNIT(other)->job);
}
static int swap_coldplug(Unit *u) {
Swap *s = SWAP(u);
SwapState new_state = SWAP_DEAD;
int r;
assert(s);
assert(s->state == SWAP_DEAD);
if (s->deserialized_state != s->state)
new_state = s->deserialized_state;
else if (s->from_proc_swaps)
new_state = SWAP_ACTIVE;
if (new_state == s->state)
return 0;
if (s->control_pid > 0 &&
pid_is_unwaited(s->control_pid) &&
SWAP_STATE_WITH_PROCESS(new_state)) {
r = unit_watch_pid(UNIT(s), s->control_pid);
if (r < 0)
return r;
r = swap_arm_timer(s, usec_add(u->state_change_timestamp.monotonic, s->timeout_usec));
if (r < 0)
return r;
}
if (!IN_SET(new_state, SWAP_DEAD, SWAP_FAILED)) {
(void) unit_setup_dynamic_creds(u);
(void) unit_setup_exec_runtime(u);
}
swap_set_state(s, new_state);
return 0;
}
static void swap_dump(Unit *u, FILE *f, const char *prefix) {
char buf[FORMAT_TIMESPAN_MAX];
Swap *s = SWAP(u);
SwapParameters *p;
assert(s);
assert(f);
if (s->from_proc_swaps)
p = &s->parameters_proc_swaps;
else if (s->from_fragment)
p = &s->parameters_fragment;
else
p = NULL;
fprintf(f,
"%sSwap State: %s\n"
"%sResult: %s\n"
"%sWhat: %s\n"
"%sFrom /proc/swaps: %s\n"
"%sFrom fragment: %s\n",
prefix, swap_state_to_string(s->state),
prefix, swap_result_to_string(s->result),
prefix, s->what,
prefix, yes_no(s->from_proc_swaps),
prefix, yes_no(s->from_fragment));
if (s->devnode)
fprintf(f, "%sDevice Node: %s\n", prefix, s->devnode);
if (p)
fprintf(f,
"%sPriority: %i\n"
"%sOptions: %s\n",
prefix, p->priority,
prefix, strempty(p->options));
fprintf(f,
"%sTimeoutSec: %s\n",
prefix, format_timespan(buf, sizeof(buf), s->timeout_usec, USEC_PER_SEC));
if (s->control_pid > 0)
fprintf(f,
"%sControl PID: "PID_FMT"\n",
prefix, s->control_pid);
exec_context_dump(&s->exec_context, f, prefix);
kill_context_dump(&s->kill_context, f, prefix);
cgroup_context_dump(&s->cgroup_context, f, prefix);
}
static int swap_spawn(Swap *s, ExecCommand *c, pid_t *_pid) {
ExecParameters exec_params = {
.flags = EXEC_APPLY_SANDBOXING|EXEC_APPLY_CHROOT|EXEC_APPLY_TTY_STDIN,
.stdin_fd = -1,
.stdout_fd = -1,
.stderr_fd = -1,
};
pid_t pid;
int r;
assert(s);
assert(c);
assert(_pid);
r = unit_prepare_exec(UNIT(s));
if (r < 0)
return r;
r = swap_arm_timer(s, usec_add(now(CLOCK_MONOTONIC), s->timeout_usec));
if (r < 0)
goto fail;
manager_set_exec_params(UNIT(s)->manager, &exec_params);
unit_set_exec_params(UNIT(s), &exec_params);
r = exec_spawn(UNIT(s),
c,
&s->exec_context,
&exec_params,
s->exec_runtime,
&s->dynamic_creds,
&pid);
if (r < 0)
goto fail;
r = unit_watch_pid(UNIT(s), pid);
if (r < 0)
/* FIXME: we need to do something here */
goto fail;
*_pid = pid;
return 0;
fail:
s->timer_event_source = sd_event_source_unref(s->timer_event_source);
return r;
}
static void swap_enter_dead(Swap *s, SwapResult f) {
assert(s);
if (s->result == SWAP_SUCCESS)
s->result = f;
if (s->result != SWAP_SUCCESS)
log_unit_warning(UNIT(s), "Failed with result '%s'.", swap_result_to_string(s->result));
swap_set_state(s, s->result != SWAP_SUCCESS ? SWAP_FAILED : SWAP_DEAD);
s->exec_runtime = exec_runtime_unref(s->exec_runtime, true);
exec_context_destroy_runtime_directory(&s->exec_context, UNIT(s)->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
unit_unref_uid_gid(UNIT(s), true);
dynamic_creds_destroy(&s->dynamic_creds);
}
static void swap_enter_active(Swap *s, SwapResult f) {
assert(s);
if (s->result == SWAP_SUCCESS)
s->result = f;
swap_set_state(s, SWAP_ACTIVE);
}
static void swap_enter_dead_or_active(Swap *s, SwapResult f) {
assert(s);
if (s->from_proc_swaps)
swap_enter_active(s, f);
else
swap_enter_dead(s, f);
}
static void swap_enter_signal(Swap *s, SwapState state, SwapResult f) {
int r;
KillOperation kop;
assert(s);
if (s->result == SWAP_SUCCESS)
s->result = f;
if (state == SWAP_DEACTIVATING_SIGTERM)
kop = KILL_TERMINATE;
else
kop = KILL_KILL;
r = unit_kill_context(UNIT(s), &s->kill_context, kop, -1, s->control_pid, false);
if (r < 0)
goto fail;
if (r > 0) {
r = swap_arm_timer(s, usec_add(now(CLOCK_MONOTONIC), s->timeout_usec));
if (r < 0)
goto fail;
swap_set_state(s, state);
} else if (state == SWAP_DEACTIVATING_SIGTERM && s->kill_context.send_sigkill)
swap_enter_signal(s, SWAP_DEACTIVATING_SIGKILL, SWAP_SUCCESS);
else
swap_enter_dead_or_active(s, SWAP_SUCCESS);
return;
fail:
log_unit_warning_errno(UNIT(s), r, "Failed to kill processes: %m");
swap_enter_dead_or_active(s, SWAP_FAILURE_RESOURCES);
}
static void swap_enter_activating(Swap *s) {
_cleanup_free_ char *opts = NULL;
int r;
assert(s);
unit_warn_leftover_processes(UNIT(s));
s->control_command_id = SWAP_EXEC_ACTIVATE;
s->control_command = s->exec_command + SWAP_EXEC_ACTIVATE;
if (s->from_fragment) {
int priority = -1;
r = fstab_find_pri(s->parameters_fragment.options, &priority);
if (r < 0)
log_warning_errno(r, "Failed to parse swap priority \"%s\", ignoring: %m", s->parameters_fragment.options);
else if (r == 1 && s->parameters_fragment.priority >= 0)
log_warning("Duplicate swap priority configuration by Priority and Options fields.");
if (r <= 0 && s->parameters_fragment.priority >= 0) {
if (s->parameters_fragment.options)
r = asprintf(&opts, "%s,pri=%i", s->parameters_fragment.options, s->parameters_fragment.priority);
else
r = asprintf(&opts, "pri=%i", s->parameters_fragment.priority);
if (r < 0)
goto fail;
}
}
r = exec_command_set(s->control_command, "/sbin/swapon", NULL);
if (r < 0)
goto fail;
if (s->parameters_fragment.options || opts) {
r = exec_command_append(s->control_command, "-o",
opts ? : s->parameters_fragment.options, NULL);
if (r < 0)
goto fail;
}
r = exec_command_append(s->control_command, s->what, NULL);
if (r < 0)
goto fail;
swap_unwatch_control_pid(s);
r = swap_spawn(s, s->control_command, &s->control_pid);
if (r < 0)
goto fail;
swap_set_state(s, SWAP_ACTIVATING);
return;
fail:
log_unit_warning_errno(UNIT(s), r, "Failed to run 'swapon' task: %m");
swap_enter_dead_or_active(s, SWAP_FAILURE_RESOURCES);
}
static void swap_enter_deactivating(Swap *s) {
int r;
assert(s);
s->control_command_id = SWAP_EXEC_DEACTIVATE;
s->control_command = s->exec_command + SWAP_EXEC_DEACTIVATE;
r = exec_command_set(s->control_command,
"/sbin/swapoff",
s->what,
NULL);
if (r < 0)
goto fail;
swap_unwatch_control_pid(s);
r = swap_spawn(s, s->control_command, &s->control_pid);
if (r < 0)
goto fail;
swap_set_state(s, SWAP_DEACTIVATING);
return;
fail:
log_unit_warning_errno(UNIT(s), r, "Failed to run 'swapoff' task: %m");
swap_enter_dead_or_active(s, SWAP_FAILURE_RESOURCES);
}
static int swap_start(Unit *u) {
Swap *s = SWAP(u), *other;
int r;
assert(s);
/* We cannot fulfill this request right now, try again later please! */
if (IN_SET(s->state,
SWAP_DEACTIVATING,
SWAP_DEACTIVATING_SIGTERM,
SWAP_DEACTIVATING_SIGKILL))
return -EAGAIN;
/* Already on it! */
if (s->state == SWAP_ACTIVATING)
return 0;
assert(IN_SET(s->state, SWAP_DEAD, SWAP_FAILED));
if (detect_container() > 0)
return -EPERM;
/* If there's a job for another swap unit for the same node
* running, then let's not dispatch this one for now, and wait
* until that other job has finished. */
LIST_FOREACH_OTHERS(same_devnode, other, s)
if (UNIT(other)->job && UNIT(other)->job->state == JOB_RUNNING)
return -EAGAIN;
r = unit_start_limit_test(u);
if (r < 0) {
swap_enter_dead(s, SWAP_FAILURE_START_LIMIT_HIT);
return r;
}
r = unit_acquire_invocation_id(u);
if (r < 0)
return r;
s->result = SWAP_SUCCESS;
u->reset_accounting = true;
swap_enter_activating(s);
return 1;
}
static int swap_stop(Unit *u) {
Swap *s = SWAP(u);
assert(s);
switch (s->state) {
case SWAP_DEACTIVATING:
case SWAP_DEACTIVATING_SIGTERM:
case SWAP_DEACTIVATING_SIGKILL:
/* Already on it */
return 0;
case SWAP_ACTIVATING:
case SWAP_ACTIVATING_DONE:
/* There's a control process pending, directly enter kill mode */
swap_enter_signal(s, SWAP_DEACTIVATING_SIGTERM, SWAP_SUCCESS);
return 0;
case SWAP_ACTIVE:
if (detect_container() > 0)
return -EPERM;
swap_enter_deactivating(s);
return 1;
default:
assert_not_reached("Unexpected state.");
}
}
static int swap_serialize(Unit *u, FILE *f, FDSet *fds) {
Swap *s = SWAP(u);
assert(s);
assert(f);
assert(fds);
unit_serialize_item(u, f, "state", swap_state_to_string(s->state));
unit_serialize_item(u, f, "result", swap_result_to_string(s->result));
if (s->control_pid > 0)
unit_serialize_item_format(u, f, "control-pid", PID_FMT, s->control_pid);
if (s->control_command_id >= 0)
unit_serialize_item(u, f, "control-command", swap_exec_command_to_string(s->control_command_id));
return 0;
}
static int swap_deserialize_item(Unit *u, const char *key, const char *value, FDSet *fds) {
Swap *s = SWAP(u);
assert(s);
assert(fds);
if (streq(key, "state")) {
SwapState state;
state = swap_state_from_string(value);
if (state < 0)
log_unit_debug(u, "Failed to parse state value: %s", value);
else
s->deserialized_state = state;
} else if (streq(key, "result")) {
SwapResult f;
f = swap_result_from_string(value);
if (f < 0)
log_unit_debug(u, "Failed to parse result value: %s", value);
else if (f != SWAP_SUCCESS)
s->result = f;
} else if (streq(key, "control-pid")) {
pid_t pid;
if (parse_pid(value, &pid) < 0)
log_unit_debug(u, "Failed to parse control-pid value: %s", value);
else
s->control_pid = pid;
} else if (streq(key, "control-command")) {
SwapExecCommand id;
id = swap_exec_command_from_string(value);
if (id < 0)
log_unit_debug(u, "Failed to parse exec-command value: %s", value);
else {
s->control_command_id = id;
s->control_command = s->exec_command + id;
}
} else
log_unit_debug(u, "Unknown serialization key: %s", key);
return 0;
}
_pure_ static UnitActiveState swap_active_state(Unit *u) {
assert(u);
return state_translation_table[SWAP(u)->state];
}
_pure_ static const char *swap_sub_state_to_string(Unit *u) {
assert(u);
return swap_state_to_string(SWAP(u)->state);
}
_pure_ static bool swap_check_gc(Unit *u) {
Swap *s = SWAP(u);
assert(s);
return s->from_proc_swaps;
}
static void swap_sigchld_event(Unit *u, pid_t pid, int code, int status) {
Swap *s = SWAP(u);
SwapResult f;
assert(s);
assert(pid >= 0);
if (pid != s->control_pid)
return;
s->control_pid = 0;
if (is_clean_exit(code, status, EXIT_CLEAN_COMMAND, NULL))
f = SWAP_SUCCESS;
else if (code == CLD_EXITED)
f = SWAP_FAILURE_EXIT_CODE;
else if (code == CLD_KILLED)
f = SWAP_FAILURE_SIGNAL;
else if (code == CLD_DUMPED)
f = SWAP_FAILURE_CORE_DUMP;
else
assert_not_reached("Unknown code");
if (s->result == SWAP_SUCCESS)
s->result = f;
if (s->control_command) {
exec_status_exit(&s->control_command->exec_status, &s->exec_context, pid, code, status);
s->control_command = NULL;
s->control_command_id = _SWAP_EXEC_COMMAND_INVALID;
}
log_unit_full(u, f == SWAP_SUCCESS ? LOG_DEBUG : LOG_NOTICE, 0,
"Swap process exited, code=%s status=%i", sigchld_code_to_string(code), status);
switch (s->state) {
case SWAP_ACTIVATING:
case SWAP_ACTIVATING_DONE:
if (f == SWAP_SUCCESS || s->from_proc_swaps)
swap_enter_active(s, f);
else
swap_enter_dead(s, f);
break;
case SWAP_DEACTIVATING:
case SWAP_DEACTIVATING_SIGKILL:
case SWAP_DEACTIVATING_SIGTERM:
swap_enter_dead_or_active(s, f);
break;
default:
assert_not_reached("Uh, control process died at wrong time.");
}
/* Notify clients about changed exit status */
unit_add_to_dbus_queue(u);
}
static int swap_dispatch_timer(sd_event_source *source, usec_t usec, void *userdata) {
Swap *s = SWAP(userdata);
assert(s);
assert(s->timer_event_source == source);
switch (s->state) {
case SWAP_ACTIVATING:
case SWAP_ACTIVATING_DONE:
log_unit_warning(UNIT(s), "Activation timed out. Stopping.");
swap_enter_signal(s, SWAP_DEACTIVATING_SIGTERM, SWAP_FAILURE_TIMEOUT);
break;
case SWAP_DEACTIVATING:
log_unit_warning(UNIT(s), "Deactivation timed out. Stopping.");
swap_enter_signal(s, SWAP_DEACTIVATING_SIGTERM, SWAP_FAILURE_TIMEOUT);
break;
case SWAP_DEACTIVATING_SIGTERM:
if (s->kill_context.send_sigkill) {
log_unit_warning(UNIT(s), "Swap process timed out. Killing.");
swap_enter_signal(s, SWAP_DEACTIVATING_SIGKILL, SWAP_FAILURE_TIMEOUT);
} else {
log_unit_warning(UNIT(s), "Swap process timed out. Skipping SIGKILL. Ignoring.");
swap_enter_dead_or_active(s, SWAP_FAILURE_TIMEOUT);
}
break;
case SWAP_DEACTIVATING_SIGKILL:
log_unit_warning(UNIT(s), "Swap process still around after SIGKILL. Ignoring.");
swap_enter_dead_or_active(s, SWAP_FAILURE_TIMEOUT);
break;
default:
assert_not_reached("Timeout at wrong time.");
}
return 0;
}
static int swap_load_proc_swaps(Manager *m, bool set_flags) {
unsigned i;
int r = 0;
assert(m);
rewind(m->proc_swaps);
(void) fscanf(m->proc_swaps, "%*s %*s %*s %*s %*s\n");
for (i = 1;; i++) {
_cleanup_free_ char *dev = NULL, *d = NULL;
int prio = 0, k;
k = fscanf(m->proc_swaps,
"%ms " /* device/file */
"%*s " /* type of swap */
"%*s " /* swap size */
"%*s " /* used */
"%i\n", /* priority */
&dev, &prio);
if (k != 2) {
if (k == EOF)
break;
log_warning("Failed to parse /proc/swaps:%u.", i);
continue;
}
if (cunescape(dev, UNESCAPE_RELAX, &d) < 0)
return log_oom();
device_found_node(m, d, true, DEVICE_FOUND_SWAP, set_flags);
k = swap_process_new(m, d, prio, set_flags);
if (k < 0)
r = k;
}
return r;
}
static int swap_dispatch_io(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
Manager *m = userdata;
Unit *u;
int r;
assert(m);
assert(revents & EPOLLPRI);
r = swap_load_proc_swaps(m, true);
if (r < 0) {
log_error_errno(r, "Failed to reread /proc/swaps: %m");
/* Reset flags, just in case, for late calls */
LIST_FOREACH(units_by_type, u, m->units_by_type[UNIT_SWAP]) {
Swap *swap = SWAP(u);
swap->is_active = swap->just_activated = false;
}
return 0;
}
manager_dispatch_load_queue(m);
LIST_FOREACH(units_by_type, u, m->units_by_type[UNIT_SWAP]) {
Swap *swap = SWAP(u);
if (!swap->is_active) {
/* This has just been deactivated */
swap_unset_proc_swaps(swap);
switch (swap->state) {
case SWAP_ACTIVE:
swap_enter_dead(swap, SWAP_SUCCESS);
break;
default:
/* Fire again */
swap_set_state(swap, swap->state);
break;
}
if (swap->what)
device_found_node(m, swap->what, false, DEVICE_FOUND_SWAP, true);
} else if (swap->just_activated) {
/* New swap entry */
switch (swap->state) {
case SWAP_DEAD:
case SWAP_FAILED:
(void) unit_acquire_invocation_id(UNIT(swap));
swap_enter_active(swap, SWAP_SUCCESS);
break;
case SWAP_ACTIVATING:
swap_set_state(swap, SWAP_ACTIVATING_DONE);
break;
default:
/* Nothing really changed, but let's
* issue an notification call
* nonetheless, in case somebody is
* waiting for this. */
swap_set_state(swap, swap->state);
break;
}
}
/* Reset the flags for later calls */
swap->is_active = swap->just_activated = false;
}
return 1;
}
static Unit *swap_following(Unit *u) {
Swap *s = SWAP(u);
Swap *other, *first = NULL;
assert(s);
/* If the user configured the swap through /etc/fstab or
* a device unit, follow that. */
if (s->from_fragment)
return NULL;
LIST_FOREACH_OTHERS(same_devnode, other, s)
if (other->from_fragment)
return UNIT(other);
/* Otherwise, make everybody follow the unit that's named after
* the swap device in the kernel */
if (streq_ptr(s->what, s->devnode))
return NULL;
LIST_FOREACH_AFTER(same_devnode, other, s)
if (streq_ptr(other->what, other->devnode))
return UNIT(other);
LIST_FOREACH_BEFORE(same_devnode, other, s) {
if (streq_ptr(other->what, other->devnode))
return UNIT(other);
first = other;
}
/* Fall back to the first on the list */
return UNIT(first);
}
static int swap_following_set(Unit *u, Set **_set) {
Swap *s = SWAP(u), *other;
Set *set;
int r;
assert(s);
assert(_set);
if (LIST_JUST_US(same_devnode, s)) {
*_set = NULL;
return 0;
}
set = set_new(NULL);
if (!set)
return -ENOMEM;
LIST_FOREACH_OTHERS(same_devnode, other, s) {
r = set_put(set, other);
if (r < 0)
goto fail;
}
*_set = set;
return 1;
fail:
set_free(set);
return r;
}
static void swap_shutdown(Manager *m) {
assert(m);
m->swap_event_source = sd_event_source_unref(m->swap_event_source);
m->proc_swaps = safe_fclose(m->proc_swaps);
m->swaps_by_devnode = hashmap_free(m->swaps_by_devnode);
}
static void swap_enumerate(Manager *m) {
int r;
assert(m);
if (!m->proc_swaps) {
m->proc_swaps = fopen("/proc/swaps", "re");
if (!m->proc_swaps) {
if (errno == ENOENT)
log_debug("Not swap enabled, skipping enumeration");
else
log_error_errno(errno, "Failed to open /proc/swaps: %m");
return;
}
r = sd_event_add_io(m->event, &m->swap_event_source, fileno(m->proc_swaps), EPOLLPRI, swap_dispatch_io, m);
if (r < 0) {
log_error_errno(r, "Failed to watch /proc/swaps: %m");
goto fail;
}
/* Dispatch this before we dispatch SIGCHLD, so that
* we always get the events from /proc/swaps before
* the SIGCHLD of /sbin/swapon. */
r = sd_event_source_set_priority(m->swap_event_source, SD_EVENT_PRIORITY_NORMAL-10);
if (r < 0) {
log_error_errno(r, "Failed to change /proc/swaps priority: %m");
goto fail;
}
(void) sd_event_source_set_description(m->swap_event_source, "swap-proc");
}
r = swap_load_proc_swaps(m, false);
if (r < 0)
goto fail;
return;
fail:
swap_shutdown(m);
}
int swap_process_device_new(Manager *m, struct udev_device *dev) {
struct udev_list_entry *item = NULL, *first = NULL;
_cleanup_free_ char *e = NULL;
const char *dn;
Unit *u;
int r = 0;
assert(m);
assert(dev);
dn = udev_device_get_devnode(dev);
if (!dn)
return 0;
r = unit_name_from_path(dn, ".swap", &e);
if (r < 0)
return r;
u = manager_get_unit(m, e);
if (u)
r = swap_set_devnode(SWAP(u), dn);
first = udev_device_get_devlinks_list_entry(dev);
udev_list_entry_foreach(item, first) {
_cleanup_free_ char *n = NULL;
int q;
q = unit_name_from_path(udev_list_entry_get_name(item), ".swap", &n);
if (q < 0)
return q;
u = manager_get_unit(m, n);
if (u) {
q = swap_set_devnode(SWAP(u), dn);
if (q < 0)
r = q;
}
}
return r;
}
int swap_process_device_remove(Manager *m, struct udev_device *dev) {
const char *dn;
int r = 0;
Swap *s;
dn = udev_device_get_devnode(dev);
if (!dn)
return 0;
while ((s = hashmap_get(m->swaps_by_devnode, dn))) {
int q;
q = swap_set_devnode(s, NULL);
if (q < 0)
r = q;
}
return r;
}
static void swap_reset_failed(Unit *u) {
Swap *s = SWAP(u);
assert(s);
if (s->state == SWAP_FAILED)
swap_set_state(s, SWAP_DEAD);
s->result = SWAP_SUCCESS;
}
static int swap_kill(Unit *u, KillWho who, int signo, sd_bus_error *error) {
return unit_kill_common(u, who, signo, -1, SWAP(u)->control_pid, error);
}
static int swap_get_timeout(Unit *u, usec_t *timeout) {
Swap *s = SWAP(u);
usec_t t;
int r;
if (!s->timer_event_source)
return 0;
r = sd_event_source_get_time(s->timer_event_source, &t);
if (r < 0)
return r;
if (t == USEC_INFINITY)
return 0;
*timeout = t;
return 1;
}
static bool swap_supported(void) {
static int supported = -1;
/* If swap support is not available in the kernel, or we are
* running in a container we don't support swap units, and any
* attempts to starting one should fail immediately. */
if (supported < 0)
supported =
access("/proc/swaps", F_OK) >= 0 &&
detect_container() <= 0;
return supported;
}
static int swap_control_pid(Unit *u) {
Swap *s = SWAP(u);
assert(s);
return s->control_pid;
}
static const char* const swap_exec_command_table[_SWAP_EXEC_COMMAND_MAX] = {
[SWAP_EXEC_ACTIVATE] = "ExecActivate",
[SWAP_EXEC_DEACTIVATE] = "ExecDeactivate",
};
DEFINE_STRING_TABLE_LOOKUP(swap_exec_command, SwapExecCommand);
static const char* const swap_result_table[_SWAP_RESULT_MAX] = {
[SWAP_SUCCESS] = "success",
[SWAP_FAILURE_RESOURCES] = "resources",
[SWAP_FAILURE_TIMEOUT] = "timeout",
[SWAP_FAILURE_EXIT_CODE] = "exit-code",
[SWAP_FAILURE_SIGNAL] = "signal",
[SWAP_FAILURE_CORE_DUMP] = "core-dump",
[SWAP_FAILURE_START_LIMIT_HIT] = "start-limit-hit",
};
DEFINE_STRING_TABLE_LOOKUP(swap_result, SwapResult);
const UnitVTable swap_vtable = {
.object_size = sizeof(Swap),
.exec_context_offset = offsetof(Swap, exec_context),
.cgroup_context_offset = offsetof(Swap, cgroup_context),
.kill_context_offset = offsetof(Swap, kill_context),
.exec_runtime_offset = offsetof(Swap, exec_runtime),
.dynamic_creds_offset = offsetof(Swap, dynamic_creds),
.sections =
"Unit\0"
"Swap\0"
"Install\0",
.private_section = "Swap",
.init = swap_init,
.load = swap_load,
.done = swap_done,
.coldplug = swap_coldplug,
.dump = swap_dump,
.start = swap_start,
.stop = swap_stop,
.kill = swap_kill,
.get_timeout = swap_get_timeout,
.serialize = swap_serialize,
.deserialize_item = swap_deserialize_item,
.active_state = swap_active_state,
.sub_state_to_string = swap_sub_state_to_string,
.check_gc = swap_check_gc,
.sigchld_event = swap_sigchld_event,
.reset_failed = swap_reset_failed,
.control_pid = swap_control_pid,
.bus_vtable = bus_swap_vtable,
.bus_set_property = bus_swap_set_property,
.bus_commit_properties = bus_swap_commit_properties,
.following = swap_following,
.following_set = swap_following_set,
.enumerate = swap_enumerate,
.shutdown = swap_shutdown,
.supported = swap_supported,
.status_message_formats = {
.starting_stopping = {
[0] = "Activating swap %s...",
[1] = "Deactivating swap %s...",
},
.finished_start_job = {
[JOB_DONE] = "Activated swap %s.",
[JOB_FAILED] = "Failed to activate swap %s.",
[JOB_TIMEOUT] = "Timed out activating swap %s.",
},
.finished_stop_job = {
[JOB_DONE] = "Deactivated swap %s.",
[JOB_FAILED] = "Failed deactivating swap %s.",
[JOB_TIMEOUT] = "Timed out deactivating swap %s.",
},
},
};
| neheb/systemd | src/core/swap.c | C | gpl-2.0 | 46,326 |
/*
* File: prg.c
* Implements: parsing javascript program aggregate
*
* Copyright: Jens Låås, 2015
* Copyright license: According to GPL, see file COPYING in this directory.
*
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include "duktape.h"
#include "prg.h"
static struct prg *prg_call;
static struct native_mod *prg_native_modules;
int prg_register(struct prg *prg, struct native_mod *native_modules)
{
prg_call = prg;
prg_native_modules = native_modules;
return 0;
}
struct mod *prg_storage_byname(const char *name)
{
struct mod *mod;
for(mod = prg_call->modules; mod; mod=mod->next) {
if(!strcmp(mod->fullname, name))
return mod;
}
return (void*)0;
}
struct mod *prg_storage_byid(int id)
{
struct mod *mod;
for(mod = prg_call->modules; mod; mod=mod->next) {
if(mod->id == id)
return mod;
}
return (void*)0;
}
static struct mod *mod_new()
{
struct mod *m;
m = malloc(sizeof(struct mod));
if(m) memset(m, 0, sizeof(struct mod));
return m;
}
int prg_wrapped_compile_execute(duk_context *ctx) {
int ret;
struct prg *prg = prg_call;
duk_compile(ctx, 0);
close(prg->fd);
munmap(prg->buf, prg->size);
duk_push_global_object(ctx); /* 'this' binding */
duk_call_method(ctx, 0);
prg->status = duk_to_int(ctx, -1);
duk_pop(ctx); // pop return value
// Check if global property 'main' exists
duk_push_global_object(ctx);
ret = duk_get_prop_string(ctx, -1, "main");
duk_remove(ctx, -2); // remove global object
// If main exists we call it
if(ret && duk_get_type(ctx, -1) != DUK_TYPE_UNDEFINED) {
int i;
duk_push_global_object(ctx); /* 'this' binding */
for(i=1;i<prg->argc;i++) {
duk_push_string(ctx, prg->argv[i]);
}
duk_call_method(ctx, prg->argc-1);
prg->status = duk_to_int(ctx, -1);
}
duk_pop(ctx);
return 0; // no values returned (0)
}
static int x(duk_context *ctx)
{
duk_push_string(ctx, "hello from x");
return 1;
}
static const duk_function_list_entry x_funcs[] = {
{ "x", x, 0 /* fd */ },
{ NULL, NULL, 0 }
};
/* Duktape.modSearch = function (id, require, exports, module) */
static int modSearch(duk_context *ctx)
{
struct mod *mod;
struct prg *prg;
int i;
prg = prg_call;
const char *id = duk_to_string(ctx, 0);
/*
* To support the native module case, the module search function can also return undefined
* (or any non-string value), in which case Duktape will assume that the module was found
* but has no Ecmascript source to execute. Symbols written to exports in the module search
* function are the only symbols provided by the module.
*/
for(i=0;;i++) {
if(!prg_native_modules[i].name) break;
if(!strcmp(id, prg_native_modules[i].name)) {
prg_native_modules[i].fn(ctx, 2, prg);
duk_push_undefined(ctx);
return 1;
}
}
if(!strcmp(id, "x")) {
duk_put_function_list(ctx, 2, x_funcs);
duk_push_undefined(ctx);
return 1;
}
/*
* If a module is found, the module search function can return a string providing the source
* code for the module. Duktape will then take care of compiling and executing the module code
* so that module symbols get registered into the exports object.
*/
for(mod=prg->modules;mod;mod=mod->next) {
if(!strcmp(mod->name, id)) {
duk_push_lstring(ctx, mod->buf, mod->size);
return 1;
}
}
duk_error(ctx, DUK_ERR_ERROR, "failed to find module '%s'", id);
return DUK_RET_ERROR;
}
int prg_push_modsearch(duk_context *ctx)
{
duk_get_prop_string(ctx, -1, "Duktape");
duk_push_c_function(ctx, modSearch, 4);
duk_put_prop_string(ctx, -2, "modSearch");
duk_pop(ctx); // pop Duktape
return 0;
}
int prg_parse_appfile(struct prg *prg)
{
char *p, *endp, *start, *m, *mainstart;
off_t offset, pa_offset;
struct mod *mod;
int id = 1000;
// read file prg->name
prg->fd = open(prg->name, O_RDONLY);
if(prg->fd == -1) {
exit(1);
}
prg->size = lseek(prg->fd, 0, SEEK_END);
prg->buf = mmap((void*)0, prg->size, PROT_READ, MAP_PRIVATE, prg->fd, 0);
/* parse file header
*/
p = prg->buf;
endp = prg->buf + prg->size;
if(*p == '#') {
while((p < endp) && (*p != '\n')) p++;
if(p >= endp) {
fprintf(stderr, "EOF\n");
exit(1);
}
p++;
}
mainstart = p;
mod = mod_new();
mod->id = id++;
for(start=p;p < endp;p++) {
if(*p == '\n') {
/* is this a module specification? */
for(m = start; *m == ' '; m++);
if((*m >= '0') && (*m <= '9')) {
mod->size = strtoul(m, &m, 10);
if(!m) break;
if(*m != ' ') break;
m++;
mod->name = strndup(m, p-m);
mod->fullname = strdup(mod->name);
if(!strcmp(mod->name, "total"))
break;
mod->next = prg->modules;
prg->modules = mod;
mod = mod_new();
mod->id = id++;
} else
break;
start = p+1;
}
}
offset = prg->size;
for(mod = prg->modules; mod; mod=mod->next) {
offset -= mod->size;
pa_offset = offset & ~(sysconf(_SC_PAGE_SIZE) - 1);
mod->buf = mmap((void*)0, mod->size + offset - pa_offset,
PROT_READ, MAP_PRIVATE, prg->fd, pa_offset);
if(mod->buf == MAP_FAILED) {
fprintf(stderr, "mmap failed\n");
exit(1);
}
mod->buf += (offset - pa_offset);
}
for(mod = prg->modules; mod; mod=mod->next) {
char *p;
if((p=strrchr(mod->name, '.'))) {
if(!strcmp(p, ".js"))
*p=0;
}
}
for(mod = prg->modules; mod; mod=mod->next) {
if(!strcmp(mod->name, "main")) {
prg->main = mod;
} else {
char *p;
if((p=strrchr(mod->name, '/'))) {
if(!strcmp(p+1, "main")) {
prg->main = mod;
}
}
}
}
if(!prg->modules) {
prg->main = mod_new();
prg->main->id = id++;
prg->main->buf = mainstart;
prg->main->size = prg->size - (mainstart - prg->buf);
prg->main->name = "main";
prg->main->fullname = "main";
}
if(!prg->main) {
fprintf(stderr, "no main module\n");
exit(1);
}
return 0;
}
static int prg1_storage(duk_context *ctx)
{
const char *name = duk_to_string(ctx, 0);
struct mod *mod;
mod = prg_storage_byname(name);
if(mod) {
duk_push_object(ctx);
duk_push_int(ctx, mod->id);
duk_put_prop_string(ctx, -2, "id");
duk_push_int(ctx, mod->size);
duk_put_prop_string(ctx, -2, "size");
duk_push_string(ctx, mod->name);
duk_put_prop_string(ctx, -2, "name");
duk_push_string(ctx, mod->fullname);
duk_put_prop_string(ctx, -2, "fullname");
} else {
duk_push_undefined(ctx);
}
return 1;
}
static int prg1_storage_write(duk_context *ctx)
{
struct mod *mod;
int rc = -1;
int id, fd;
size_t offset, len;
fd = duk_to_int(ctx, 0);
id = duk_to_int(ctx, 1);
offset = duk_to_int(ctx, 2);
len = duk_to_int(ctx, 3);
mod = prg_storage_byid(id);
if(mod) {
rc = write(fd, mod->buf + offset, len);
}
duk_push_int(ctx, rc);
return 1;
}
static int prg1_storage_buffer(duk_context *ctx)
{
struct mod *mod;
int id;
size_t offset, len;
char *buf;
id = duk_to_int(ctx, 0);
offset = duk_to_int(ctx, 1);
len = duk_to_int(ctx, 2);
mod = prg_storage_byid(id);
if(mod) {
buf = duk_push_fixed_buffer(ctx, len);
memcpy(buf, mod->buf + offset, len);
} else {
duk_push_undefined(ctx);
}
return 1;
}
static const duk_function_list_entry prg1_funcs[] = {
{ "storage", prg1_storage, 1 /* name */ },
{ "storage_write", prg1_storage_write, 4 /* fd, id, offset, len */ },
{ "storage_buffer", prg1_storage_buffer, 4 /* id, offset, len */ },
{ NULL, NULL, 0 }
};
static const duk_number_list_entry prg1_consts[] = {
{ NULL, 0.0 }
};
int prg1_load(duk_context *ctx, int n, struct prg *prg)
{
duk_put_function_list(ctx, n, prg1_funcs);
duk_put_number_list(ctx, n, prg1_consts);
return 0;
}
| jelaas/sys | prg.c | C | gpl-2.0 | 7,575 |
/*
* Intel XScale PXA Programmable Interrupt Controller.
*
* Copyright (c) 2006 Openedhand Ltd.
* Copyright (c) 2006 Thorsten Zitterell
* Written by Andrzej Zaborowski <balrog@zabor.org>
*
* This code is licenced under the GPL.
*/
#include "hw.h"
#include "pxa.h"
#define ICIP 0x00 /* Interrupt Controller IRQ Pending register */
#define ICMR 0x04 /* Interrupt Controller Mask register */
#define ICLR 0x08 /* Interrupt Controller Level register */
#define ICFP 0x0c /* Interrupt Controller FIQ Pending register */
#define ICPR 0x10 /* Interrupt Controller Pending register */
#define ICCR 0x14 /* Interrupt Controller Control register */
#define ICHP 0x18 /* Interrupt Controller Highest Priority register */
#define IPR0 0x1c /* Interrupt Controller Priority register 0 */
#define IPR31 0x98 /* Interrupt Controller Priority register 31 */
#define ICIP2 0x9c /* Interrupt Controller IRQ Pending register 2 */
#define ICMR2 0xa0 /* Interrupt Controller Mask register 2 */
#define ICLR2 0xa4 /* Interrupt Controller Level register 2 */
#define ICFP2 0xa8 /* Interrupt Controller FIQ Pending register 2 */
#define ICPR2 0xac /* Interrupt Controller Pending register 2 */
#define IPR32 0xb0 /* Interrupt Controller Priority register 32 */
#define IPR39 0xcc /* Interrupt Controller Priority register 39 */
#define PXA2XX_PIC_SRCS 40
typedef struct {
CPUState *cpu_env;
uint32_t int_enabled[2];
uint32_t int_pending[2];
uint32_t is_fiq[2];
uint32_t int_idle;
uint32_t priority[PXA2XX_PIC_SRCS];
} PXA2xxPICState;
static void pxa2xx_pic_update(void *opaque)
{
uint32_t mask[2];
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
if (s->cpu_env->halted) {
mask[0] = s->int_pending[0] & (s->int_enabled[0] | s->int_idle);
mask[1] = s->int_pending[1] & (s->int_enabled[1] | s->int_idle);
if (mask[0] || mask[1])
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_EXITTB);
}
mask[0] = s->int_pending[0] & s->int_enabled[0];
mask[1] = s->int_pending[1] & s->int_enabled[1];
if ((mask[0] & s->is_fiq[0]) || (mask[1] & s->is_fiq[1]))
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_FIQ);
else
cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_FIQ);
if ((mask[0] & ~s->is_fiq[0]) || (mask[1] & ~s->is_fiq[1]))
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
else
cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
}
/* Note: Here level means state of the signal on a pin, not
* IRQ/FIQ distinction as in PXA Developer Manual. */
static void pxa2xx_pic_set_irq(void *opaque, int irq, int level)
{
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
int int_set = (irq >= 32);
irq &= 31;
if (level)
s->int_pending[int_set] |= 1 << irq;
else
s->int_pending[int_set] &= ~(1 << irq);
pxa2xx_pic_update(opaque);
}
static inline uint32_t pxa2xx_pic_highest(PXA2xxPICState *s) {
int i, int_set, irq;
uint32_t bit, mask[2];
uint32_t ichp = 0x003f003f; /* Both IDs invalid */
mask[0] = s->int_pending[0] & s->int_enabled[0];
mask[1] = s->int_pending[1] & s->int_enabled[1];
for (i = PXA2XX_PIC_SRCS - 1; i >= 0; i --) {
irq = s->priority[i] & 0x3f;
if ((s->priority[i] & (1 << 31)) && irq < PXA2XX_PIC_SRCS) {
/* Source peripheral ID is valid. */
bit = 1 << (irq & 31);
int_set = (irq >= 32);
if (mask[int_set] & bit & s->is_fiq[int_set]) {
/* FIQ asserted */
ichp &= 0xffff0000;
ichp |= (1 << 15) | irq;
}
if (mask[int_set] & bit & ~s->is_fiq[int_set]) {
/* IRQ asserted */
ichp &= 0x0000ffff;
ichp |= (1 << 31) | (irq << 16);
}
}
}
return ichp;
}
static uint32_t pxa2xx_pic_mem_read(void *opaque, target_phys_addr_t offset)
{
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
switch (offset) {
case ICIP: /* IRQ Pending register */
return s->int_pending[0] & ~s->is_fiq[0] & s->int_enabled[0];
case ICIP2: /* IRQ Pending register 2 */
return s->int_pending[1] & ~s->is_fiq[1] & s->int_enabled[1];
case ICMR: /* Mask register */
return s->int_enabled[0];
case ICMR2: /* Mask register 2 */
return s->int_enabled[1];
case ICLR: /* Level register */
return s->is_fiq[0];
case ICLR2: /* Level register 2 */
return s->is_fiq[1];
case ICCR: /* Idle mask */
return (s->int_idle == 0);
case ICFP: /* FIQ Pending register */
return s->int_pending[0] & s->is_fiq[0] & s->int_enabled[0];
case ICFP2: /* FIQ Pending register 2 */
return s->int_pending[1] & s->is_fiq[1] & s->int_enabled[1];
case ICPR: /* Pending register */
return s->int_pending[0];
case ICPR2: /* Pending register 2 */
return s->int_pending[1];
case IPR0 ... IPR31:
return s->priority[0 + ((offset - IPR0 ) >> 2)];
case IPR32 ... IPR39:
return s->priority[32 + ((offset - IPR32) >> 2)];
case ICHP: /* Highest Priority register */
return pxa2xx_pic_highest(s);
default:
printf("%s: Bad register offset " REG_FMT "\n", __FUNCTION__, offset);
return 0;
}
}
static void pxa2xx_pic_mem_write(void *opaque, target_phys_addr_t offset,
uint32_t value)
{
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
switch (offset) {
case ICMR: /* Mask register */
s->int_enabled[0] = value;
break;
case ICMR2: /* Mask register 2 */
s->int_enabled[1] = value;
break;
case ICLR: /* Level register */
s->is_fiq[0] = value;
break;
case ICLR2: /* Level register 2 */
s->is_fiq[1] = value;
break;
case ICCR: /* Idle mask */
s->int_idle = (value & 1) ? 0 : ~0;
break;
case IPR0 ... IPR31:
s->priority[0 + ((offset - IPR0 ) >> 2)] = value & 0x8000003f;
break;
case IPR32 ... IPR39:
s->priority[32 + ((offset - IPR32) >> 2)] = value & 0x8000003f;
break;
default:
printf("%s: Bad register offset " REG_FMT "\n", __FUNCTION__, offset);
return;
}
pxa2xx_pic_update(opaque);
}
/* Interrupt Controller Coprocessor Space Register Mapping */
static const int pxa2xx_cp_reg_map[0x10] = {
[0x0 ... 0xf] = -1,
[0x0] = ICIP,
[0x1] = ICMR,
[0x2] = ICLR,
[0x3] = ICFP,
[0x4] = ICPR,
[0x5] = ICHP,
[0x6] = ICIP2,
[0x7] = ICMR2,
[0x8] = ICLR2,
[0x9] = ICFP2,
[0xa] = ICPR2,
};
static uint32_t pxa2xx_pic_cp_read(void *opaque, int op2, int reg, int crm,
void *retaddr)
{
target_phys_addr_t offset;
if (pxa2xx_cp_reg_map[reg] == -1) {
printf("%s: Bad register 0x%x\n", __FUNCTION__, reg);
return 0;
}
offset = pxa2xx_cp_reg_map[reg];
return pxa2xx_pic_mem_read(opaque, offset);
}
static void pxa2xx_pic_cp_write(void *opaque, int op2, int reg, int crm,
uint32_t value, void *retaddr)
{
target_phys_addr_t offset;
if (pxa2xx_cp_reg_map[reg] == -1) {
printf("%s: Bad register 0x%x\n", __FUNCTION__, reg);
return;
}
offset = pxa2xx_cp_reg_map[reg];
pxa2xx_pic_mem_write(opaque, offset, value);
}
static CPUReadMemoryFunc * const pxa2xx_pic_readfn[] = {
pxa2xx_pic_mem_read,
pxa2xx_pic_mem_read,
pxa2xx_pic_mem_read,
};
static CPUWriteMemoryFunc * const pxa2xx_pic_writefn[] = {
pxa2xx_pic_mem_write,
pxa2xx_pic_mem_write,
pxa2xx_pic_mem_write,
};
static void pxa2xx_pic_save(QEMUFile *f, void *opaque)
{
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
int i;
for (i = 0; i < 2; i ++)
qemu_put_be32s(f, &s->int_enabled[i]);
for (i = 0; i < 2; i ++)
qemu_put_be32s(f, &s->int_pending[i]);
for (i = 0; i < 2; i ++)
qemu_put_be32s(f, &s->is_fiq[i]);
qemu_put_be32s(f, &s->int_idle);
for (i = 0; i < PXA2XX_PIC_SRCS; i ++)
qemu_put_be32s(f, &s->priority[i]);
}
static int pxa2xx_pic_load(QEMUFile *f, void *opaque, int version_id)
{
PXA2xxPICState *s = (PXA2xxPICState *) opaque;
int i;
for (i = 0; i < 2; i ++)
qemu_get_be32s(f, &s->int_enabled[i]);
for (i = 0; i < 2; i ++)
qemu_get_be32s(f, &s->int_pending[i]);
for (i = 0; i < 2; i ++)
qemu_get_be32s(f, &s->is_fiq[i]);
qemu_get_be32s(f, &s->int_idle);
for (i = 0; i < PXA2XX_PIC_SRCS; i ++)
qemu_get_be32s(f, &s->priority[i]);
pxa2xx_pic_update(opaque);
return 0;
}
qemu_irq *pxa2xx_pic_init(target_phys_addr_t base, CPUState *env)
{
PXA2xxPICState *s;
int iomemtype;
qemu_irq *qi;
s = (PXA2xxPICState *)
qemu_mallocz(sizeof(PXA2xxPICState));
if (!s)
return NULL;
s->cpu_env = env;
s->int_pending[0] = 0;
s->int_pending[1] = 0;
s->int_enabled[0] = 0;
s->int_enabled[1] = 0;
s->is_fiq[0] = 0;
s->is_fiq[1] = 0;
qi = qemu_allocate_irqs(pxa2xx_pic_set_irq, s, PXA2XX_PIC_SRCS);
/* Enable IC memory-mapped registers access. */
iomemtype = cpu_register_io_memory(pxa2xx_pic_readfn,
pxa2xx_pic_writefn, s, DEVICE_NATIVE_ENDIAN);
cpu_register_physical_memory(base, 0x00100000, iomemtype);
/* Enable IC coprocessor access. */
cpu_arm_set_cp_io(env, 6, pxa2xx_pic_cp_read, pxa2xx_pic_cp_write, s);
register_savevm(NULL, "pxa2xx_pic", 0, 0, pxa2xx_pic_save,
pxa2xx_pic_load, s);
return qi;
}
| XVilka/qemu | hw/pxa2xx_pic.c | C | gpl-2.0 | 9,607 |
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/jiffies.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/amigayle.h>
#include <asm/amipcmcia.h>
#include "8390.h"
#define DRV_NAME "apne"
#define NE_BASE (dev->base_addr)
#define NE_CMD 0x00
#define NE_DATAPORT 0x10
#define NE_RESET 0x1f
#define NE_IO_EXTENT 0x20
#define NE_EN0_ISR 0x07
#define NE_EN0_DCFG 0x0e
#define NE_EN0_RSARLO 0x08
#define NE_EN0_RSARHI 0x09
#define NE_EN0_RCNTLO 0x0a
#define NE_EN0_RXCR 0x0c
#define NE_EN0_TXCR 0x0d
#define NE_EN0_RCNTHI 0x0b
#define NE_EN0_IMR 0x0f
#define NE1SM_START_PG 0x20
#define NE1SM_STOP_PG 0x40
#define NESM_START_PG 0x40
#define NESM_STOP_PG 0x80
struct net_device * __init apne_probe(int unit);
static int apne_probe1(struct net_device *dev, int ioaddr);
static void apne_reset_8390(struct net_device *dev);
static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page);
static void apne_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
static void apne_block_output(struct net_device *dev, const int count,
const unsigned char *buf, const int start_page);
static irqreturn_t apne_interrupt(int irq, void *dev_id);
static int init_pcmcia(void);
#define IOBASE 0x300
static const char version[] =
"apne.c:v1.1 7/10/98 Alain Malek (Alain.Malek@cryogen.ch)\n";
static int apne_owned;
struct net_device * __init apne_probe(int unit)
{
struct net_device *dev;
#ifndef MANUAL_CONFIG
char tuple[8];
#endif
int err;
if (!MACH_IS_AMIGA)
return ERR_PTR(-ENODEV);
if (apne_owned)
return ERR_PTR(-ENODEV);
if ( !(AMIGAHW_PRESENT(PCMCIA)) )
return ERR_PTR(-ENODEV);
printk("Looking for PCMCIA ethernet card : ");
if (!(PCMCIA_INSERTED)) {
printk("NO PCMCIA card inserted\n");
return ERR_PTR(-ENODEV);
}
dev = alloc_ei_netdev();
if (!dev)
return ERR_PTR(-ENOMEM);
if (unit >= 0) {
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
}
pcmcia_disable_irq();
#ifndef MANUAL_CONFIG
if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) ||
(tuple[2] != CISTPL_FUNCID_NETWORK)) {
printk("not an ethernet card\n");
free_netdev(dev);
return ERR_PTR(-ENODEV);
}
#endif
printk("ethernet PCMCIA card inserted\n");
if (!init_pcmcia()) {
free_netdev(dev);
return ERR_PTR(-ENODEV);
}
if (!request_region(IOBASE, 0x20, DRV_NAME)) {
free_netdev(dev);
return ERR_PTR(-EBUSY);
}
err = apne_probe1(dev, IOBASE);
if (err) {
release_region(IOBASE, 0x20);
free_netdev(dev);
return ERR_PTR(err);
}
err = register_netdev(dev);
if (!err)
return dev;
pcmcia_disable_irq();
free_irq(IRQ_AMIGA_PORTS, dev);
pcmcia_reset();
release_region(IOBASE, 0x20);
free_netdev(dev);
return ERR_PTR(err);
}
static int __init apne_probe1(struct net_device *dev, int ioaddr)
{
int i;
unsigned char SA_prom[32];
int wordlength = 2;
const char *name = NULL;
int start_page, stop_page;
#ifndef MANUAL_HWADDR0
int neX000, ctron;
#endif
static unsigned version_printed;
if (ei_debug && version_printed++ == 0)
printk(version);
printk("PCMCIA NE*000 ethercard probe");
{ unsigned long reset_start_time = jiffies;
outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
printk(" not found (no reset ack).\n");
return -ENODEV;
}
outb(0xff, ioaddr + NE_EN0_ISR);
}
#ifndef MANUAL_HWADDR0
{
struct {unsigned long value, offset; } program_seq[] = {
{E8390_NODMA+E8390_PAGE0+E8390_STOP, NE_CMD},
{0x48, NE_EN0_DCFG},
{0x00, NE_EN0_RCNTLO},
{0x00, NE_EN0_RCNTHI},
{0x00, NE_EN0_IMR},
{0xFF, NE_EN0_ISR},
{E8390_RXOFF, NE_EN0_RXCR},
{E8390_TXOFF, NE_EN0_TXCR},
{32, NE_EN0_RCNTLO},
{0x00, NE_EN0_RCNTHI},
{0x00, NE_EN0_RSARLO},
{0x00, NE_EN0_RSARHI},
{E8390_RREAD+E8390_START, NE_CMD},
};
for (i = 0; i < ARRAY_SIZE(program_seq); i++) {
outb(program_seq[i].value, ioaddr + program_seq[i].offset);
}
}
for(i = 0; i < 32 ; i+=2) {
SA_prom[i] = inb(ioaddr + NE_DATAPORT);
SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
if (SA_prom[i] != SA_prom[i+1])
wordlength = 1;
}
if (wordlength == 2)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
if (wordlength == 2) {
outb(0x49, ioaddr + NE_EN0_DCFG);
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
} else {
start_page = NE1SM_START_PG;
stop_page = NE1SM_STOP_PG;
}
neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
if (neX000) {
name = (wordlength == 2) ? "NE2000" : "NE1000";
} else if (ctron) {
name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
start_page = 0x01;
stop_page = (wordlength == 2) ? 0x40 : 0x20;
} else {
printk(" not found.\n");
return -ENXIO;
}
#else
wordlength = 2;
outb(0x49, ioaddr + NE_EN0_DCFG);
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
SA_prom[0] = MANUAL_HWADDR0;
SA_prom[1] = MANUAL_HWADDR1;
SA_prom[2] = MANUAL_HWADDR2;
SA_prom[3] = MANUAL_HWADDR3;
SA_prom[4] = MANUAL_HWADDR4;
SA_prom[5] = MANUAL_HWADDR5;
name = "NE2000";
#endif
dev->base_addr = ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
dev->netdev_ops = &ei_netdev_ops;
i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
for(i = 0; i < ETHER_ADDR_LEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
printk("%s: %s found.\n", dev->name, name);
ei_status.name = name;
ei_status.tx_start_page = start_page;
ei_status.stop_page = stop_page;
ei_status.word16 = (wordlength == 2);
ei_status.rx_start_page = start_page + TX_PAGES;
ei_status.reset_8390 = &apne_reset_8390;
ei_status.block_input = &apne_block_input;
ei_status.block_output = &apne_block_output;
ei_status.get_8390_hdr = &apne_get_8390_hdr;
NS8390_init(dev, 0);
pcmcia_ack_int(pcmcia_get_intreq());
pcmcia_enable_irq();
apne_owned = 1;
return 0;
}
static void
apne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
init_pcmcia();
if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
ei_status.txing = 0;
ei_status.dmaing = 0;
while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
printk("%s: ne_reset_8390() did not complete.\n", dev->name);
break;
}
outb(ENISR_RESET, NE_BASE + NE_EN0_ISR);
}
static void
apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
int nic_base = dev->base_addr;
int cnt;
char *ptrc;
short *ptrs;
if (ei_status.dmaing) {
printk("%s: DMAing conflict in ne_get_8390_hdr "
"[DMAstat:%d][irqlock:%d][intr:%d].\n",
dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
outb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO);
outb(0, nic_base + NE_EN0_RCNTHI);
outb(0, nic_base + NE_EN0_RSARLO);
outb(ring_page, nic_base + NE_EN0_RSARHI);
outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
if (ei_status.word16) {
ptrs = (short*)hdr;
for(cnt = 0; cnt < (sizeof(struct e8390_pkt_hdr)>>1); cnt++)
*ptrs++ = inw(NE_BASE + NE_DATAPORT);
} else {
ptrc = (char*)hdr;
for(cnt = 0; cnt < sizeof(struct e8390_pkt_hdr); cnt++)
*ptrc++ = inb(NE_BASE + NE_DATAPORT);
}
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
ei_status.dmaing &= ~0x01;
le16_to_cpus(&hdr->count);
}
static void
apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
int nic_base = dev->base_addr;
char *buf = skb->data;
char *ptrc;
short *ptrs;
int cnt;
if (ei_status.dmaing) {
printk("%s: DMAing conflict in ne_block_input "
"[DMAstat:%d][irqlock:%d][intr:%d].\n",
dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
outb(count & 0xff, nic_base + NE_EN0_RCNTLO);
outb(count >> 8, nic_base + NE_EN0_RCNTHI);
outb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO);
outb(ring_offset >> 8, nic_base + NE_EN0_RSARHI);
outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
if (ei_status.word16) {
ptrs = (short*)buf;
for (cnt = 0; cnt < (count>>1); cnt++)
*ptrs++ = inw(NE_BASE + NE_DATAPORT);
if (count & 0x01) {
buf[count-1] = inb(NE_BASE + NE_DATAPORT);
}
} else {
ptrc = (char*)buf;
for (cnt = 0; cnt < count; cnt++)
*ptrc++ = inb(NE_BASE + NE_DATAPORT);
}
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
ei_status.dmaing &= ~0x01;
}
static void
apne_block_output(struct net_device *dev, int count,
const unsigned char *buf, const int start_page)
{
int nic_base = NE_BASE;
unsigned long dma_start;
char *ptrc;
short *ptrs;
int cnt;
if (ei_status.word16 && (count & 0x01))
count++;
if (ei_status.dmaing) {
printk("%s: DMAing conflict in ne_block_output."
"[DMAstat:%d][irqlock:%d][intr:%d]\n",
dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
return;
}
ei_status.dmaing |= 0x01;
outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
outb(count & 0xff, nic_base + NE_EN0_RCNTLO);
outb(count >> 8, nic_base + NE_EN0_RCNTHI);
outb(0x00, nic_base + NE_EN0_RSARLO);
outb(start_page, nic_base + NE_EN0_RSARHI);
outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
if (ei_status.word16) {
ptrs = (short*)buf;
for (cnt = 0; cnt < count>>1; cnt++)
outw(*ptrs++, NE_BASE+NE_DATAPORT);
} else {
ptrc = (char*)buf;
for (cnt = 0; cnt < count; cnt++)
outb(*ptrc++, NE_BASE + NE_DATAPORT);
}
dma_start = jiffies;
while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) {
printk("%s: timeout waiting for Tx RDC.\n", dev->name);
apne_reset_8390(dev);
NS8390_init(dev,1);
break;
}
outb(ENISR_RDC, nic_base + NE_EN0_ISR);
ei_status.dmaing &= ~0x01;
return;
}
static irqreturn_t apne_interrupt(int irq, void *dev_id)
{
unsigned char pcmcia_intreq;
if (!(gayle.inten & GAYLE_IRQ_IRQ))
return IRQ_NONE;
pcmcia_intreq = pcmcia_get_intreq();
if (!(pcmcia_intreq & GAYLE_IRQ_IRQ)) {
pcmcia_ack_int(pcmcia_intreq);
return IRQ_NONE;
}
if (ei_debug > 3)
printk("pcmcia intreq = %x\n", pcmcia_intreq);
pcmcia_disable_irq();
ei_interrupt(irq, dev_id);
pcmcia_ack_int(pcmcia_get_intreq());
pcmcia_enable_irq();
return IRQ_HANDLED;
}
#ifdef MODULE
static struct net_device *apne_dev;
static int __init apne_module_init(void)
{
apne_dev = apne_probe(-1);
if (IS_ERR(apne_dev))
return PTR_ERR(apne_dev);
return 0;
}
static void __exit apne_module_exit(void)
{
unregister_netdev(apne_dev);
pcmcia_disable_irq();
free_irq(IRQ_AMIGA_PORTS, apne_dev);
pcmcia_reset();
release_region(IOBASE, 0x20);
free_netdev(apne_dev);
}
module_init(apne_module_init);
module_exit(apne_module_exit);
#endif
static int init_pcmcia(void)
{
u_char config;
#ifndef MANUAL_CONFIG
u_char tuple[32];
int offset_len;
#endif
u_long offset;
pcmcia_reset();
pcmcia_program_voltage(PCMCIA_0V);
pcmcia_access_speed(PCMCIA_SPEED_250NS);
pcmcia_write_enable();
#ifdef MANUAL_CONFIG
config = MANUAL_CONFIG;
#else
if (pcmcia_copy_tuple(CISTPL_CFTABLE_ENTRY, tuple, 32) < 3)
return 0;
config = tuple[2] & 0x3f;
#endif
#ifdef MANUAL_OFFSET
offset = MANUAL_OFFSET;
#else
if (pcmcia_copy_tuple(CISTPL_CONFIG, tuple, 32) < 6)
return 0;
offset_len = (tuple[2] & 0x3) + 1;
offset = 0;
while(offset_len--) {
offset = (offset << 8) | tuple[4+offset_len];
}
#endif
out_8(GAYLE_ATTRIBUTE+offset, config);
return 1;
}
MODULE_LICENSE("GPL");
| leemgs/OptimusOneKernel-KandroidCommunity | drivers/net/apne.c | C | gpl-2.0 | 12,904 |
/**
* Copyright (C) ARM Limited 2012-2015. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#define NEWLINE_CANARY \
/* Unix */ \
"1\n" \
/* Windows */ \
"2\r\n" \
/* Mac OS */ \
"3\r" \
/* RISC OS */ \
"4\n\r" \
/* Add another character so the length isn't 0x0a bytes */ \
"5"
#ifdef MALI_SUPPORT
#include "gator_events_mali_common.h"
#endif
static void marshal_summary(long long timestamp, long long uptime, long long monotonic_delta, const char *uname)
{
unsigned long flags;
int cpu = 0;
char buf[32];
local_irq_save(flags);
gator_buffer_write_packed_int(cpu, SUMMARY_BUF, MESSAGE_SUMMARY);
gator_buffer_write_string(cpu, SUMMARY_BUF, NEWLINE_CANARY);
gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, timestamp);
gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, uptime);
gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, monotonic_delta);
gator_buffer_write_string(cpu, SUMMARY_BUF, "uname");
gator_buffer_write_string(cpu, SUMMARY_BUF, uname);
gator_buffer_write_string(cpu, SUMMARY_BUF, "PAGESIZE");
snprintf(buf, sizeof(buf), "%lu", PAGE_SIZE);
gator_buffer_write_string(cpu, SUMMARY_BUF, buf);
#if GATOR_IKS_SUPPORT
gator_buffer_write_string(cpu, SUMMARY_BUF, "iks");
gator_buffer_write_string(cpu, SUMMARY_BUF, "");
#endif
#ifdef CONFIG_PREEMPT_RTB
gator_buffer_write_string(cpu, SUMMARY_BUF, "preempt_rtb");
gator_buffer_write_string(cpu, SUMMARY_BUF, "");
#endif
#ifdef CONFIG_PREEMPT_RT_FULL
gator_buffer_write_string(cpu, SUMMARY_BUF, "preempt_rt_full");
gator_buffer_write_string(cpu, SUMMARY_BUF, "");
#endif
/* Let Streamline know which GPU is used so that it can label the GPU Activity appropriately. This is a temporary fix, to be improved in a future release. */
#ifdef MALI_SUPPORT
gator_buffer_write_string(cpu, SUMMARY_BUF, "mali_type");
#if (MALI_SUPPORT == MALI_4xx)
gator_buffer_write_string(cpu, SUMMARY_BUF, "4xx");
#elif (MALI_SUPPORT == MALI_MIDGARD)
gator_buffer_write_string(cpu, SUMMARY_BUF, "6xx");
#else
gator_buffer_write_string(cpu, SUMMARY_BUF, "unknown");
#endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
gator_buffer_write_string(cpu, SUMMARY_BUF, "nosync");
gator_buffer_write_string(cpu, SUMMARY_BUF, "");
#endif
gator_buffer_write_string(cpu, SUMMARY_BUF, "");
/* Commit the buffer now so it can be one of the first frames read by Streamline */
local_irq_restore(flags);
gator_commit_buffer(cpu, SUMMARY_BUF, gator_get_time());
}
static bool marshal_cookie_header(const char *text)
{
int cpu = get_physical_cpu();
return buffer_check_space(cpu, NAME_BUF, strlen(text) + 3 * MAXSIZE_PACK32);
}
static void marshal_cookie(int cookie, const char *text)
{
int cpu = get_physical_cpu();
/* buffer_check_space already called by marshal_cookie_header */
gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_COOKIE);
gator_buffer_write_packed_int(cpu, NAME_BUF, cookie);
gator_buffer_write_string(cpu, NAME_BUF, text);
buffer_check(cpu, NAME_BUF, gator_get_time());
}
static void marshal_thread_name(int pid, char *name)
{
unsigned long flags, cpu;
u64 time;
local_irq_save(flags);
cpu = get_physical_cpu();
time = gator_get_time();
if (buffer_check_space(cpu, NAME_BUF, TASK_COMM_LEN + 3 * MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_THREAD_NAME);
gator_buffer_write_packed_int64(cpu, NAME_BUF, time);
gator_buffer_write_packed_int(cpu, NAME_BUF, pid);
gator_buffer_write_string(cpu, NAME_BUF, name);
}
local_irq_restore(flags);
buffer_check(cpu, NAME_BUF, time);
}
static void marshal_link(int cookie, int tgid, int pid)
{
unsigned long cpu = get_physical_cpu(), flags;
u64 time;
local_irq_save(flags);
time = gator_get_time();
if (buffer_check_space(cpu, ACTIVITY_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, MESSAGE_LINK);
gator_buffer_write_packed_int64(cpu, ACTIVITY_BUF, time);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, cookie);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, tgid);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, pid);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, ACTIVITY_BUF, time);
}
static bool marshal_backtrace_header(int exec_cookie, int tgid, int pid, u64 time)
{
int cpu = get_physical_cpu();
if (!buffer_check_space(cpu, BACKTRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32 + gator_backtrace_depth * 2 * MAXSIZE_PACK32)) {
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, BACKTRACE_BUF, time);
return false;
}
gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, time);
gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, exec_cookie);
gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, tgid);
gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, pid);
return true;
}
static void marshal_backtrace(unsigned long address, int cookie, int in_kernel)
{
int cpu = get_physical_cpu();
if (cookie == 0 && !in_kernel)
cookie = UNRESOLVED_COOKIE;
gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, cookie);
gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, address);
}
static void marshal_backtrace_footer(u64 time)
{
int cpu = get_physical_cpu();
gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, MESSAGE_END_BACKTRACE);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, BACKTRACE_BUF, time);
}
static bool marshal_event_header(u64 time)
{
unsigned long flags, cpu = get_physical_cpu();
bool retval = false;
local_irq_save(flags);
if (buffer_check_space(cpu, BLOCK_COUNTER_BUF, MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, 0); /* key of zero indicates a timestamp */
gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, time);
retval = true;
}
local_irq_restore(flags);
return retval;
}
static void marshal_event(int len, int *buffer)
{
unsigned long i, flags, cpu = get_physical_cpu();
if (len <= 0)
return;
/* length must be even since all data is a (key, value) pair */
if (len & 0x1) {
pr_err("gator: invalid counter data detected and discarded\n");
return;
}
/* events must be written in key,value pairs */
local_irq_save(flags);
for (i = 0; i < len; i += 2) {
if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK32))
break;
gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, buffer[i]);
gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, buffer[i + 1]);
}
local_irq_restore(flags);
}
static void marshal_event64(int len, long long *buffer64)
{
unsigned long i, flags, cpu = get_physical_cpu();
if (len <= 0)
return;
/* length must be even since all data is a (key, value) pair */
if (len & 0x1) {
pr_err("gator: invalid counter data detected and discarded\n");
return;
}
/* events must be written in key,value pairs */
local_irq_save(flags);
for (i = 0; i < len; i += 2) {
if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK64))
break;
gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i]);
gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i + 1]);
}
local_irq_restore(flags);
}
static void __maybe_unused marshal_event_single(int core, int key, int value)
{
unsigned long flags, cpu;
u64 time;
local_irq_save(flags);
cpu = get_physical_cpu();
time = gator_get_time();
if (buffer_check_space(cpu, COUNTER_BUF, MAXSIZE_PACK64 + 3 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int64(cpu, COUNTER_BUF, time);
gator_buffer_write_packed_int(cpu, COUNTER_BUF, core);
gator_buffer_write_packed_int(cpu, COUNTER_BUF, key);
gator_buffer_write_packed_int(cpu, COUNTER_BUF, value);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, COUNTER_BUF, time);
}
static void __maybe_unused marshal_event_single64(int core, int key, long long value)
{
unsigned long flags, cpu;
u64 time;
local_irq_save(flags);
cpu = get_physical_cpu();
time = gator_get_time();
if (buffer_check_space(cpu, COUNTER_BUF, 2 * MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int64(cpu, COUNTER_BUF, time);
gator_buffer_write_packed_int(cpu, COUNTER_BUF, core);
gator_buffer_write_packed_int(cpu, COUNTER_BUF, key);
gator_buffer_write_packed_int64(cpu, COUNTER_BUF, value);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, COUNTER_BUF, time);
}
static void marshal_sched_trace_switch(int pid, int state)
{
unsigned long cpu = get_physical_cpu(), flags;
u64 time;
if (!per_cpu(gator_buffer, cpu)[SCHED_TRACE_BUF])
return;
local_irq_save(flags);
time = gator_get_time();
if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_SWITCH);
gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, time);
gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, state);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, SCHED_TRACE_BUF, time);
}
static void marshal_sched_trace_exit(int tgid, int pid)
{
unsigned long cpu = get_physical_cpu(), flags;
u64 time;
if (!per_cpu(gator_buffer, cpu)[SCHED_TRACE_BUF])
return;
local_irq_save(flags);
time = gator_get_time();
if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_EXIT);
gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, time);
gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, SCHED_TRACE_BUF, time);
}
#if GATOR_CPU_FREQ_SUPPORT
static void marshal_idle(int core, int state)
{
unsigned long flags, cpu;
u64 time;
local_irq_save(flags);
cpu = get_physical_cpu();
time = gator_get_time();
if (buffer_check_space(cpu, IDLE_BUF, MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int(cpu, IDLE_BUF, state);
gator_buffer_write_packed_int64(cpu, IDLE_BUF, time);
gator_buffer_write_packed_int(cpu, IDLE_BUF, core);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, IDLE_BUF, time);
}
#endif
#if defined(__arm__) || defined(__aarch64__)
static void marshal_core_name(const int core, const int cpuid, const char *name)
{
int cpu = get_physical_cpu();
unsigned long flags;
local_irq_save(flags);
if (buffer_check_space(cpu, SUMMARY_BUF, MAXSIZE_PACK32 + MAXSIZE_CORE_NAME)) {
gator_buffer_write_packed_int(cpu, SUMMARY_BUF, MESSAGE_CORE_NAME);
gator_buffer_write_packed_int(cpu, SUMMARY_BUF, core);
gator_buffer_write_packed_int(cpu, SUMMARY_BUF, cpuid);
gator_buffer_write_string(cpu, SUMMARY_BUF, name);
}
/* Commit core names now so that they can show up in live */
local_irq_restore(flags);
gator_commit_buffer(cpu, SUMMARY_BUF, gator_get_time());
}
#endif
static void marshal_activity_switch(int core, int key, int activity, int pid, int state)
{
unsigned long cpu = get_physical_cpu(), flags;
u64 time;
if (!per_cpu(gator_buffer, cpu)[ACTIVITY_BUF])
return;
local_irq_save(flags);
time = gator_get_time();
if (buffer_check_space(cpu, ACTIVITY_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, MESSAGE_SWITCH);
gator_buffer_write_packed_int64(cpu, ACTIVITY_BUF, time);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, core);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, key);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, activity);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, pid);
gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, state);
}
local_irq_restore(flags);
/* Check and commit; commit is set to occur once buffer is 3/4 full */
buffer_check(cpu, ACTIVITY_BUF, time);
}
void gator_marshal_activity_switch(int core, int key, int activity, int pid)
{
/* state is reserved for cpu use only */
marshal_activity_switch(core, key, activity, pid, 0);
}
| javilonas/Lonas_KL-SM-G901F | drivers/gator/gator_marshaling.c | C | gpl-2.0 | 12,460 |
/*
* Copyright (c) 2017, 2020, Oracle and/or its affiliates.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <graalvm/llvm/polyglot.h>
#include <graalvm/llvm/handles.h>
int main() {
void *p = polyglot_import("object");
void *p1 = create_handle(p);
void *p2 = resolve_handle(p1);
if (p != p2) {
return 1;
}
return 0;
}
| smarr/Truffle | sulong/tests/com.oracle.truffle.llvm.tests.interop.native/interop/createResolveHandle.c | C | gpl-2.0 | 1,844 |
/*
** Copyright (C) 1991, 1997 Free Software Foundation, Inc.
**
** This file is part of TACK.
**
** TACK is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2, or (at your option)
** any later version.
**
** TACK is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with TACK; see the file COPYING. If not, write to
** the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
** Boston, MA 02111-1307, USA.
*/
#include <tack.h>
MODULE_ID("$Id: menu.c,v 1.1.1.1 2004/03/24 19:53:07 sure Exp $")
/*
Menu control
*/
static void test_byname(struct test_menu *, int *, int *);
struct test_list *augment_test;
char prompt_string[80]; /* menu prompt storage */
/*
** menu_prompt()
**
** Print the menu prompt string.
*/
void
menu_prompt(void)
{
ptext(&prompt_string[1]);
}
/*
** menu_test_loop(test-structure, state, control-character)
**
** This function implements the repeat test function.
*/
static void
menu_test_loop(
struct test_list *test,
int *state,
int *ch)
{
int nch, p;
if ((test->flags & MENU_REP_MASK) && (augment_test != test)) {
/* set the augment variable (first time only) */
p = (test->flags >> 8) & 15;
if ((test->flags & MENU_REP_MASK) == MENU_LM1) {
augment = lines - 1;
} else
if ((test->flags & MENU_ONE_MASK) == MENU_ONE) {
augment = 1;
} else
if ((test->flags & MENU_LC_MASK) == MENU_lines) {
augment = lines * p / 10;
} else
if ((test->flags & MENU_LC_MASK) == MENU_columns) {
augment = columns * p / 10;
} else {
augment = 1;
}
augment_test = test;
set_augment_txt();
}
do {
if ((test->flags | *state) & MENU_CLEAR) {
put_clear();
} else
if (line_count + test->lines_needed >= lines) {
put_clear();
}
nch = 0;
if (test->test_procedure) {
/* The procedure takes precedence so I can pass
the menu entry as an argument.
*/
can_test(test->caps_done, FLAG_TESTED);
can_test(test->caps_tested, FLAG_TESTED);
test->test_procedure(test, state, &nch);
} else
if (test->sub_menu) {
/* nested menu's */
menu_display(test->sub_menu, &nch);
*state = 0;
if (nch == 'q' || nch == 's') {
/* Quit and skip are killed here */
nch = '?';
}
} else {
break; /* cya */
}
if (nch == '\r' || nch == '\n' || nch == 'n') {
nch = 0;
break;
}
} while (nch == 'r');
*ch = nch;
}
/*
** menu_display(menu-structure, flags)
**
** This function implements menu control.
*/
void
menu_display(
struct test_menu *menu,
int *last_ch)
{
int test_state = 0, run_standard_tests;
int hot_topic, ch = 0, nch = 0;
struct test_list *mt;
struct test_list *repeat_tests = 0;
int repeat_state = 0;
int prompt_length;
prompt_length = strlen(prompt_string);
if (menu->ident) {
sprintf(&prompt_string[prompt_length], "/%s", menu->ident);
}
hot_topic = menu->default_action;
run_standard_tests = menu->standard_tests ?
menu->standard_tests[0] : -1;
if (!last_ch) {
last_ch = &ch;
}
while (1) {
if (ch == 0) {
/* Display the menu */
put_crlf();
if (menu->menu_function) {
/*
this function may be used to restrict menu
entries. If used it must print the title.
*/
menu->menu_function(menu);
} else
if (menu->menu_title) {
ptextln(menu->menu_title);
}
for (mt = menu->tests; (mt->flags & MENU_LAST) == 0; mt++) {
if (mt->menu_entry) {
ptext(" ");
ptextln(mt->menu_entry);
}
}
if (menu->standard_tests) {
ptext(" ");
ptextln(menu->standard_tests);
ptextln(" r) repeat test");
ptextln(" s) skip to next test");
}
ptextln(" q) quit");
ptextln(" ?) help");
}
if (ch == 0 || ch == REQUEST_PROMPT) {
put_crlf();
ptext(&prompt_string[1]);
if (hot_topic) {
ptext(" [");
putchp(hot_topic);
ptext("]");
}
ptext(" > ");
/* read a character */
ch = wait_here();
}
if (ch == '\r' || ch == '\n') {
ch = hot_topic;
}
if (ch == 'q') {
break;
}
if (ch == '?') {
ch = 0;
continue;
}
nch = ch;
ch = 0;
/* Run one of the standard tests (by request) */
for (mt = menu->tests; (mt->flags & MENU_LAST) == 0; mt++) {
if (mt->menu_entry && (nch == mt->menu_entry[0])) {
if (mt->flags & MENU_MENU) {
test_byname(menu, &test_state, &nch);
} else {
menu_test_loop(mt, &test_state, &nch);
}
ch = nch;
if ((mt->flags & MENU_COMPLETE) && ch == 0) {
/* top level */
hot_topic = 'q';
ch = '?';
}
}
}
if (menu->standard_tests && nch == 'r') {
menu->resume_tests = repeat_tests;
test_state = repeat_state;
nch = run_standard_tests;
}
if (nch == run_standard_tests) {
if (!(mt = menu->resume_tests)) {
mt = menu->tests;
}
if (mt->flags & MENU_LAST) {
mt = menu->tests;
}
/* Run the standard test suite */
for ( ; (mt->flags & MENU_LAST) == 0; ) {
if ((mt->flags & MENU_NEXT) == MENU_NEXT) {
repeat_tests = mt;
repeat_state = test_state;
nch = run_standard_tests;
menu_test_loop(mt, &test_state, &nch);
if (nch != 0 && nch != 'n') {
ch = nch;
break;
}
if (test_state & MENU_STOP) {
break;
}
}
mt++;
}
if (ch == 0) {
ch = hot_topic;
}
menu->resume_tests = mt;
menu->resume_state = test_state;
menu->resume_char = ch;
if (ch == run_standard_tests) {
/* pop up a level */
break;
}
}
}
*last_ch = ch;
prompt_string[prompt_length] = '\0';
}
/*
** generic_done_message(test_list)
**
** Print the Done message and request input.
*/
void
generic_done_message(
struct test_list *test,
int *state,
int *ch)
{
char done_message[128];
if (test->caps_done) {
sprintf(done_message, "(%s) Done ", test->caps_done);
ptext(done_message);
} else {
ptext("Done ");
}
*ch = wait_here();
if (*ch == '\r' || *ch == '\n' || *ch == 'n') {
*ch = 0;
}
if (*ch == 's') {
*state |= MENU_STOP;
*ch = 0;
}
}
/*
** menu_clear_screen(test, state, ch)
**
** Just clear the screen.
*/
void
menu_clear_screen(
struct test_list *test GCC_UNUSED,
int *state GCC_UNUSED,
int *ch GCC_UNUSED)
{
put_clear();
}
/*
** menu_reset_init(test, state, ch)
**
** Send the reset and init strings.
*/
void
menu_reset_init(
struct test_list *test GCC_UNUSED,
int *state GCC_UNUSED,
int *ch GCC_UNUSED)
{
reset_init();
put_crlf();
}
/*
** subtest_menu(test, state, ch)
**
** Scan the menu looking for something to execute
** Return TRUE if we found anything.
*/
int
subtest_menu(
struct test_list *test,
int *state,
int *ch)
{
struct test_list *mt;
if (*ch) {
for (mt = test; (mt->flags & MENU_LAST) == 0; mt++) {
if (mt->menu_entry && (*ch == mt->menu_entry[0])) {
*ch = 0;
menu_test_loop(mt, state, ch);
return TRUE;
}
}
}
return FALSE;
}
/*
** menu_can_scan(menu-structure)
**
** Recursively scan the menu tree and find which cap names can be tested.
*/
void
menu_can_scan(
const struct test_menu *menu)
{
struct test_list *mt;
for (mt = menu->tests; (mt->flags & MENU_LAST) == 0; mt++) {
can_test(mt->caps_done, FLAG_CAN_TEST);
can_test(mt->caps_tested, FLAG_CAN_TEST);
if (!(mt->test_procedure)) {
if (mt->sub_menu) {
menu_can_scan(mt->sub_menu);
}
}
}
}
/*
** menu_search(menu-structure, cap)
**
** Recursively search the menu tree and execute any tests that use cap.
*/
static void
menu_search(
struct test_menu *menu,
int *state,
int *ch,
char *cap)
{
struct test_list *mt;
int nch;
for (mt = menu->tests; (mt->flags & MENU_LAST) == 0; mt++) {
nch = 0;
if (cap_match(mt->caps_done, cap)
|| cap_match(mt->caps_tested, cap)) {
menu_test_loop(mt, state, &nch);
}
if (!(mt->test_procedure)) {
if (mt->sub_menu) {
menu_search(mt->sub_menu, state, &nch, cap);
}
}
if (*state & MENU_STOP) {
break;
}
if (nch != 0 && nch != 'n') {
*ch = nch;
break;
}
}
}
/*
** test_byname(menu, state, ch)
**
** Get a cap name then run all tests that use that cap.
*/
static void
test_byname(
struct test_menu *menu,
int *state GCC_UNUSED,
int *ch)
{
int test_state = 0;
char cap[32];
if (tty_can_sync == SYNC_NOT_TESTED) {
verify_time();
}
ptext("enter name: ");
read_string(cap, sizeof(cap));
if (cap[0]) {
menu_search(menu, &test_state, ch, cap);
}
*ch = '?';
}
| nslu2/glibc | ncurses/tack/menu.c | C | gpl-2.0 | 8,641 |
/*
* linux/fs/namei.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* Some corrections by tytso.
*/
/* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
* lookup logic.
*/
/* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/fsnotify.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/ima.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fcntl.h>
#include <linux/device_cgroup.h>
#include <linux/fs_struct.h>
#include <linux/posix_acl.h>
#include <linux/hash.h>
#include <asm/uaccess.h>
#include "internal.h"
#include "mount.h"
/* [Feb-1997 T. Schoebel-Theuer]
* Fundamental changes in the pathname lookup mechanisms (namei)
* were necessary because of omirr. The reason is that omirr needs
* to know the _real_ pathname, not the user-supplied one, in case
* of symlinks (and also when transname replacements occur).
*
* The new code replaces the old recursive symlink resolution with
* an iterative one (in case of non-nested symlink chains). It does
* this with calls to <fs>_follow_link().
* As a side effect, dir_namei(), _namei() and follow_link() are now
* replaced with a single function lookup_dentry() that can handle all
* the special cases of the former code.
*
* With the new dcache, the pathname is stored at each inode, at least as
* long as the refcount of the inode is positive. As a side effect, the
* size of the dcache depends on the inode cache and thus is dynamic.
*
* [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
* resolution to correspond with current state of the code.
*
* Note that the symlink resolution is not *completely* iterative.
* There is still a significant amount of tail- and mid- recursion in
* the algorithm. Also, note that <fs>_readlink() is not used in
* lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
* may return different results than <fs>_follow_link(). Many virtual
* filesystems (including /proc) exhibit this behavior.
*/
/* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
* New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
* and the name already exists in form of a symlink, try to create the new
* name indicated by the symlink. The old code always complained that the
* name already exists, due to not following the symlink even if its target
* is nonexistent. The new semantics affects also mknod() and link() when
* the name is a symlink pointing to a non-existent name.
*
* I don't know which semantics is the right one, since I have no access
* to standards. But I found by trial that HP-UX 9.0 has the full "new"
* semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
* "old" one. Personally, I think the new semantics is much more logical.
* Note that "ln old new" where "new" is a symlink pointing to a non-existing
* file does succeed in both HP-UX and SunOs, but not in Solaris
* and in the old Linux semantics.
*/
/* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
* semantics. See the comments in "open_namei" and "do_link" below.
*
* [10-Sep-98 Alan Modra] Another symlink change.
*/
/* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
* inside the path - always follow.
* in the last component in creation/removal/renaming - never follow.
* if LOOKUP_FOLLOW passed - follow.
* if the pathname has trailing slashes - follow.
* otherwise - don't follow.
* (applied in that order).
*
* [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
* restored for 2.4. This is the last surviving part of old 4.2BSD bug.
* During the 2.4 we need to fix the userland stuff depending on it -
* hopefully we will be able to get rid of that wart in 2.5. So far only
* XEmacs seems to be relying on it...
*/
/*
* [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
* implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
* any extra contention...
*/
/* In order to reduce some races, while at the same time doing additional
* checking and hopefully speeding things up, we copy filenames to the
* kernel data space before using them..
*
* POSIX.1 2.4: an empty pathname is invalid (ENOENT).
* PATH_MAX includes the nul terminator --RR.
*/
static int do_getname(const char __user *filename, char *page)
{
int retval;
unsigned long len = PATH_MAX;
if (!segment_eq(get_fs(), KERNEL_DS)) {
if ((unsigned long) filename >= TASK_SIZE)
return -EFAULT;
if (TASK_SIZE - (unsigned long) filename < PATH_MAX)
len = TASK_SIZE - (unsigned long) filename;
}
retval = strncpy_from_user(page, filename, len);
if (retval > 0) {
if (retval < len)
return 0;
return -ENAMETOOLONG;
} else if (!retval)
retval = -ENOENT;
return retval;
}
static char *getname_flags(const char __user *filename, int flags, int *empty)
{
char *result = __getname();
int retval;
if (!result)
return ERR_PTR(-ENOMEM);
retval = do_getname(filename, result);
if (retval < 0) {
if (retval == -ENOENT && empty)
*empty = 1;
if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
__putname(result);
return ERR_PTR(retval);
}
}
audit_getname(result);
return result;
}
char *getname(const char __user * filename)
{
return getname_flags(filename, 0, NULL);
}
#ifdef CONFIG_AUDITSYSCALL
void putname(const char *name)
{
if (unlikely(!audit_dummy_context()))
audit_putname(name);
else
__putname(name);
}
EXPORT_SYMBOL(putname);
#endif
static int check_acl(struct inode *inode, int mask)
{
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *acl;
if (mask & MAY_NOT_BLOCK) {
acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
if (!acl)
return -EAGAIN;
/* no ->get_acl() calls in RCU mode... */
if (acl == ACL_NOT_CACHED)
return -ECHILD;
return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
}
acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
/*
* A filesystem can force a ACL callback by just never filling the
* ACL cache. But normally you'd fill the cache either at inode
* instantiation time, or on the first ->get_acl call.
*
* If the filesystem doesn't have a get_acl() function at all, we'll
* just create the negative cache entry.
*/
if (acl == ACL_NOT_CACHED) {
if (inode->i_op->get_acl) {
acl = inode->i_op->get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
} else {
set_cached_acl(inode, ACL_TYPE_ACCESS, NULL);
return -EAGAIN;
}
}
if (acl) {
int error = posix_acl_permission(inode, acl, mask);
posix_acl_release(acl);
return error;
}
#endif
return -EAGAIN;
}
/*
* This does the basic permission checking
*/
static int acl_permission_check(struct inode *inode, int mask)
{
unsigned int mode = inode->i_mode;
if (current_user_ns() != inode_userns(inode))
goto other_perms;
if (likely(current_fsuid() == inode->i_uid))
mode >>= 6;
else {
if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
int error = check_acl(inode, mask);
if (error != -EAGAIN)
return error;
}
if (in_group_p(inode->i_gid))
mode >>= 3;
}
other_perms:
/*
* If the DACs are ok we don't need any capability check.
*/
if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
return 0;
return -EACCES;
}
/**
* generic_permission - check for access rights on a Posix-like filesystem
* @inode: inode to check access rights for
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
*
* Used to check for read/write/execute permissions on a file.
* We use "fsuid" for this, letting us set arbitrary permissions
* for filesystem access without changing the "normal" uids which
* are used for other things.
*
* generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
* request cannot be satisfied (eg. requires blocking or too much complexity).
* It would then be called again in ref-walk mode.
*/
int generic_permission(struct inode *inode, int mask)
{
int ret;
/*
* Do the basic permission checks.
*/
ret = acl_permission_check(inode, mask);
if (ret != -EACCES)
return ret;
if (S_ISDIR(inode->i_mode)) {
/* DACs are overridable for directories */
if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
return 0;
if (!(mask & MAY_WRITE))
if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
}
/*
* Read/write DACs are always overridable.
* Executable DACs are overridable when there is
* at least one exec bit set.
*/
if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
return 0;
/*
* Searching includes executable on directories, else just read.
*/
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ)
if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
}
/*
* We _really_ want to just do "generic_permission()" without
* even looking at the inode->i_op values. So we keep a cache
* flag in inode->i_opflags, that says "this has not special
* permission function, use the fast case".
*/
static inline int do_inode_permission(struct inode *inode, int mask)
{
if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
if (likely(inode->i_op->permission))
return inode->i_op->permission(inode, mask);
/* This gets set once for the inode lifetime */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_FASTPERM;
spin_unlock(&inode->i_lock);
}
return generic_permission(inode, mask);
}
/**
* inode_permission - check for access rights to a given inode
* @inode: inode to check permission on
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
*
* Used to check for read/write/execute permissions on an inode.
* We use "fsuid" for this, letting us set arbitrary permissions
* for filesystem access without changing the "normal" uids which
* are used for other things.
*
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
*/
int inode_permission(struct inode *inode, int mask)
{
int retval;
if (unlikely(mask & MAY_WRITE)) {
umode_t mode = inode->i_mode;
/*
* Nobody gets write access to a read-only fs.
*/
if (IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
/*
* Nobody gets write access to an immutable file.
*/
if (IS_IMMUTABLE(inode))
return -EACCES;
}
retval = do_inode_permission(inode, mask);
if (retval)
return retval;
retval = devcgroup_inode_permission(inode, mask);
if (retval)
return retval;
return security_inode_permission(inode, mask);
}
/**
* path_get - get a reference to a path
* @path: path to get the reference to
*
* Given a path increment the reference count to the dentry and the vfsmount.
*/
void path_get(struct path *path)
{
mntget(path->mnt);
dget(path->dentry);
}
EXPORT_SYMBOL(path_get);
/**
* path_put - put a reference to a path
* @path: path to put the reference to
*
* Given a path decrement the reference count to the dentry and the vfsmount.
*/
void path_put(struct path *path)
{
dput(path->dentry);
mntput(path->mnt);
}
EXPORT_SYMBOL(path_put);
/*
* Path walking has 2 modes, rcu-walk and ref-walk (see
* Documentation/filesystems/path-lookup.txt). In situations when we can't
* continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
* normal reference counts on dentries and vfsmounts to transition to rcu-walk
* mode. Refcounts are grabbed at the last known good point before rcu-walk
* got stuck, so ref-walk may continue from there. If this is not successful
* (eg. a seqcount has changed), then failure is returned and it's up to caller
* to restart the path walk from the beginning in ref-walk mode.
*/
/**
* unlazy_walk - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
* @dentry: child of nd->path.dentry or NULL
* Returns: 0 on success, -ECHILD on failure
*
* unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
* for ref-walk mode. @dentry must be a path found by a do_lookup call on
* @nd or NULL. Must be called from rcu-walk context.
*/
static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
{
struct fs_struct *fs = current->fs;
struct dentry *parent = nd->path.dentry;
int want_root = 0;
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
want_root = 1;
spin_lock(&fs->lock);
if (nd->root.mnt != fs->root.mnt ||
nd->root.dentry != fs->root.dentry)
goto err_root;
}
spin_lock(&parent->d_lock);
if (!dentry) {
if (!__d_rcu_to_refcount(parent, nd->seq))
goto err_parent;
BUG_ON(nd->inode != parent->d_inode);
} else {
if (dentry->d_parent != parent)
goto err_parent;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
if (!__d_rcu_to_refcount(dentry, nd->seq))
goto err_child;
/*
* If the sequence check on the child dentry passed, then
* the child has not been removed from its parent. This
* means the parent dentry must be valid and able to take
* a reference at this point.
*/
BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
BUG_ON(!parent->d_count);
parent->d_count++;
spin_unlock(&dentry->d_lock);
}
spin_unlock(&parent->d_lock);
if (want_root) {
path_get(&nd->root);
spin_unlock(&fs->lock);
}
mntget(nd->path.mnt);
rcu_read_unlock();
br_read_unlock(&vfsmount_lock);
nd->flags &= ~LOOKUP_RCU;
return 0;
err_child:
spin_unlock(&dentry->d_lock);
err_parent:
spin_unlock(&parent->d_lock);
err_root:
if (want_root)
spin_unlock(&fs->lock);
return -ECHILD;
}
/**
* release_open_intent - free up open intent resources
* @nd: pointer to nameidata
*/
void release_open_intent(struct nameidata *nd)
{
struct file *file = nd->intent.open.file;
if (file && !IS_ERR(file)) {
if (file->f_path.dentry == NULL)
put_filp(file);
else
fput(file);
}
}
static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd)
{
return dentry->d_op->d_revalidate(dentry, nd);
}
/**
* complete_walk - successful completion of path walk
* @nd: pointer nameidata
*
* If we had been in RCU mode, drop out of it and legitimize nd->path.
* Revalidate the final result, unless we'd already done that during
* the path walk or the filesystem doesn't ask for it. Return 0 on
* success, -error on failure. In case of failure caller does not
* need to drop nd->path.
*/
static int complete_walk(struct nameidata *nd)
{
struct dentry *dentry = nd->path.dentry;
int status;
if (nd->flags & LOOKUP_RCU) {
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
spin_lock(&dentry->d_lock);
if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
spin_unlock(&dentry->d_lock);
rcu_read_unlock();
br_read_unlock(&vfsmount_lock);
return -ECHILD;
}
BUG_ON(nd->inode != dentry->d_inode);
spin_unlock(&dentry->d_lock);
mntget(nd->path.mnt);
rcu_read_unlock();
br_read_unlock(&vfsmount_lock);
}
if (likely(!(nd->flags & LOOKUP_JUMPED)))
return 0;
if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
return 0;
if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
return 0;
/* Note: we do not d_invalidate() */
status = d_revalidate(dentry, nd);
if (status > 0)
return 0;
if (!status)
status = -ESTALE;
path_put(&nd->path);
return status;
}
static __always_inline void set_root(struct nameidata *nd)
{
if (!nd->root.mnt)
get_fs_root(current->fs, &nd->root);
}
static int link_path_walk(const char *, struct nameidata *);
static __always_inline void set_root_rcu(struct nameidata *nd)
{
if (!nd->root.mnt) {
struct fs_struct *fs = current->fs;
unsigned seq;
do {
seq = read_seqcount_begin(&fs->seq);
nd->root = fs->root;
nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
} while (read_seqcount_retry(&fs->seq, seq));
}
}
static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
{
int ret;
if (IS_ERR(link))
goto fail;
if (*link == '/') {
set_root(nd);
path_put(&nd->path);
nd->path = nd->root;
path_get(&nd->root);
nd->flags |= LOOKUP_JUMPED;
}
nd->inode = nd->path.dentry->d_inode;
ret = link_path_walk(link, nd);
return ret;
fail:
path_put(&nd->path);
return PTR_ERR(link);
}
static void path_put_conditional(struct path *path, struct nameidata *nd)
{
dput(path->dentry);
if (path->mnt != nd->path.mnt)
mntput(path->mnt);
}
static inline void path_to_nameidata(const struct path *path,
struct nameidata *nd)
{
if (!(nd->flags & LOOKUP_RCU)) {
dput(nd->path.dentry);
if (nd->path.mnt != path->mnt)
mntput(nd->path.mnt);
}
nd->path.mnt = path->mnt;
nd->path.dentry = path->dentry;
}
static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
{
struct inode *inode = link->dentry->d_inode;
if (!IS_ERR(cookie) && inode->i_op->put_link)
inode->i_op->put_link(link->dentry, nd, cookie);
path_put(link);
}
static __always_inline int
follow_link(struct path *link, struct nameidata *nd, void **p)
{
int error;
struct dentry *dentry = link->dentry;
BUG_ON(nd->flags & LOOKUP_RCU);
if (link->mnt == nd->path.mnt)
mntget(link->mnt);
if (unlikely(current->total_link_count >= 40)) {
*p = ERR_PTR(-ELOOP); /* no ->put_link(), please */
path_put(&nd->path);
return -ELOOP;
}
cond_resched();
current->total_link_count++;
touch_atime(link);
nd_set_link(nd, NULL);
error = security_inode_follow_link(link->dentry, nd);
if (error) {
*p = ERR_PTR(error); /* no ->put_link(), please */
path_put(&nd->path);
return error;
}
nd->last_type = LAST_BIND;
*p = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(*p);
if (!IS_ERR(*p)) {
char *s = nd_get_link(nd);
error = 0;
if (s)
error = __vfs_follow_link(nd, s);
else if (nd->last_type == LAST_BIND) {
nd->flags |= LOOKUP_JUMPED;
nd->inode = nd->path.dentry->d_inode;
if (nd->inode->i_op->follow_link) {
/* stepped on a _really_ weird one */
path_put(&nd->path);
error = -ELOOP;
}
}
}
return error;
}
static int follow_up_rcu(struct path *path)
{
struct mount *mnt = real_mount(path->mnt);
struct mount *parent;
struct dentry *mountpoint;
parent = mnt->mnt_parent;
if (&parent->mnt == path->mnt)
return 0;
mountpoint = mnt->mnt_mountpoint;
path->dentry = mountpoint;
path->mnt = &parent->mnt;
return 1;
}
/*
* follow_up - Find the mountpoint of path's vfsmount
*
* Given a path, find the mountpoint of its source file system.
* Replace @path with the path of the mountpoint in the parent mount.
* Up is towards /.
*
* Return 1 if we went up a level and 0 if we were already at the
* root.
*/
int follow_up(struct path *path)
{
struct mount *mnt = real_mount(path->mnt);
struct mount *parent;
struct dentry *mountpoint;
br_read_lock(&vfsmount_lock);
parent = mnt->mnt_parent;
if (&parent->mnt == path->mnt) {
br_read_unlock(&vfsmount_lock);
return 0;
}
mntget(&parent->mnt);
mountpoint = dget(mnt->mnt_mountpoint);
br_read_unlock(&vfsmount_lock);
dput(path->dentry);
path->dentry = mountpoint;
mntput(path->mnt);
path->mnt = &parent->mnt;
return 1;
}
/*
* Perform an automount
* - return -EISDIR to tell follow_managed() to stop and return the path we
* were called with.
*/
static int follow_automount(struct path *path, unsigned flags,
bool *need_mntput)
{
struct vfsmount *mnt;
int err;
if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
return -EREMOTE;
/* We don't want to mount if someone's just doing a stat -
* unless they're stat'ing a directory and appended a '/' to
* the name.
*
* We do, however, want to mount if someone wants to open or
* create a file of any type under the mountpoint, wants to
* traverse through the mountpoint or wants to open the
* mounted directory. Also, autofs may mark negative dentries
* as being automount points. These will need the attentions
* of the daemon to instantiate them before they can be used.
*/
if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
path->dentry->d_inode)
return -EISDIR;
current->total_link_count++;
if (current->total_link_count >= 40)
return -ELOOP;
mnt = path->dentry->d_op->d_automount(path);
if (IS_ERR(mnt)) {
/*
* The filesystem is allowed to return -EISDIR here to indicate
* it doesn't want to automount. For instance, autofs would do
* this so that its userspace daemon can mount on this dentry.
*
* However, we can only permit this if it's a terminal point in
* the path being looked up; if it wasn't then the remainder of
* the path is inaccessible and we should say so.
*/
if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT))
return -EREMOTE;
return PTR_ERR(mnt);
}
if (!mnt) /* mount collision */
return 0;
if (!*need_mntput) {
/* lock_mount() may release path->mnt on error */
mntget(path->mnt);
*need_mntput = true;
}
err = finish_automount(mnt, path);
switch (err) {
case -EBUSY:
/* Someone else made a mount here whilst we were busy */
return 0;
case 0:
path_put(path);
path->mnt = mnt;
path->dentry = dget(mnt->mnt_root);
return 0;
default:
return err;
}
}
/*
* Handle a dentry that is managed in some way.
* - Flagged for transit management (autofs)
* - Flagged as mountpoint
* - Flagged as automount point
*
* This may only be called in refwalk mode.
*
* Serialization is taken care of in namespace.c
*/
static int follow_managed(struct path *path, unsigned flags)
{
struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
unsigned managed;
bool need_mntput = false;
int ret = 0;
/* Given that we're not holding a lock here, we retain the value in a
* local variable for each dentry as we look at it so that we don't see
* the components of that value change under us */
while (managed = ACCESS_ONCE(path->dentry->d_flags),
managed &= DCACHE_MANAGED_DENTRY,
unlikely(managed != 0)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held. */
if (managed & DCACHE_MANAGE_TRANSIT) {
BUG_ON(!path->dentry->d_op);
BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(path->dentry, false);
if (ret < 0)
break;
}
/* Transit to a mounted filesystem. */
if (managed & DCACHE_MOUNTED) {
struct vfsmount *mounted = lookup_mnt(path);
if (mounted) {
dput(path->dentry);
if (need_mntput)
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
need_mntput = true;
continue;
}
/* Something is mounted on this dentry in another
* namespace and/or whatever was mounted there in this
* namespace got unmounted before we managed to get the
* vfsmount_lock */
}
/* Handle an automount point */
if (managed & DCACHE_NEED_AUTOMOUNT) {
ret = follow_automount(path, flags, &need_mntput);
if (ret < 0)
break;
continue;
}
/* We didn't change the current path point */
break;
}
if (need_mntput && path->mnt == mnt)
mntput(path->mnt);
if (ret == -EISDIR)
ret = 0;
return ret < 0 ? ret : need_mntput;
}
int follow_down_one(struct path *path)
{
struct vfsmount *mounted;
mounted = lookup_mnt(path);
if (mounted) {
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
return 1;
}
return 0;
}
static inline bool managed_dentry_might_block(struct dentry *dentry)
{
return (dentry->d_flags & DCACHE_MANAGE_TRANSIT &&
dentry->d_op->d_manage(dentry, true) < 0);
}
/*
* Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
* we meet a managed dentry that would need blocking.
*/
static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
struct inode **inode)
{
for (;;) {
struct mount *mounted;
/*
* Don't forget we might have a non-mountpoint managed dentry
* that wants to block transit.
*/
if (unlikely(managed_dentry_might_block(path->dentry)))
return false;
if (!d_mountpoint(path->dentry))
break;
mounted = __lookup_mnt(path->mnt, path->dentry, 1);
if (!mounted)
break;
path->mnt = &mounted->mnt;
path->dentry = mounted->mnt.mnt_root;
nd->flags |= LOOKUP_JUMPED;
nd->seq = read_seqcount_begin(&path->dentry->d_seq);
/*
* Update the inode too. We don't need to re-check the
* dentry sequence number here after this d_inode read,
* because a mount-point is always pinned.
*/
*inode = path->dentry->d_inode;
}
return true;
}
static void follow_mount_rcu(struct nameidata *nd)
{
while (d_mountpoint(nd->path.dentry)) {
struct mount *mounted;
mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1);
if (!mounted)
break;
nd->path.mnt = &mounted->mnt;
nd->path.dentry = mounted->mnt.mnt_root;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
}
static int follow_dotdot_rcu(struct nameidata *nd)
{
set_root_rcu(nd);
while (1) {
if (nd->path.dentry == nd->root.dentry &&
nd->path.mnt == nd->root.mnt) {
break;
}
if (nd->path.dentry != nd->path.mnt->mnt_root) {
struct dentry *old = nd->path.dentry;
struct dentry *parent = old->d_parent;
unsigned seq;
seq = read_seqcount_begin(&parent->d_seq);
if (read_seqcount_retry(&old->d_seq, nd->seq))
goto failed;
nd->path.dentry = parent;
nd->seq = seq;
break;
}
if (!follow_up_rcu(&nd->path))
break;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
follow_mount_rcu(nd);
nd->inode = nd->path.dentry->d_inode;
return 0;
failed:
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
br_read_unlock(&vfsmount_lock);
return -ECHILD;
}
/*
* Follow down to the covering mount currently visible to userspace. At each
* point, the filesystem owning that dentry may be queried as to whether the
* caller is permitted to proceed or not.
*/
int follow_down(struct path *path)
{
unsigned managed;
int ret;
while (managed = ACCESS_ONCE(path->dentry->d_flags),
unlikely(managed & DCACHE_MANAGED_DENTRY)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held.
*
* We indicate to the filesystem if someone is trying to mount
* something here. This gives autofs the chance to deny anyone
* other than its daemon the right to mount on its
* superstructure.
*
* The filesystem may sleep at this point.
*/
if (managed & DCACHE_MANAGE_TRANSIT) {
BUG_ON(!path->dentry->d_op);
BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(
path->dentry, false);
if (ret < 0)
return ret == -EISDIR ? 0 : ret;
}
/* Transit to a mounted filesystem. */
if (managed & DCACHE_MOUNTED) {
struct vfsmount *mounted = lookup_mnt(path);
if (!mounted)
break;
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
continue;
}
/* Don't handle automount points here */
break;
}
return 0;
}
/*
* Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
*/
static void follow_mount(struct path *path)
{
while (d_mountpoint(path->dentry)) {
struct vfsmount *mounted = lookup_mnt(path);
if (!mounted)
break;
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
}
}
static void follow_dotdot(struct nameidata *nd)
{
set_root(nd);
while(1) {
struct dentry *old = nd->path.dentry;
if (nd->path.dentry == nd->root.dentry &&
nd->path.mnt == nd->root.mnt) {
break;
}
if (nd->path.dentry != nd->path.mnt->mnt_root) {
/* rare case of legitimate dget_parent()... */
nd->path.dentry = dget_parent(nd->path.dentry);
dput(old);
break;
}
if (!follow_up(&nd->path))
break;
}
follow_mount(&nd->path);
nd->inode = nd->path.dentry->d_inode;
}
/*
* This looks up the name in dcache, possibly revalidates the old dentry and
* allocates a new one if not found or not valid. In the need_lookup argument
* returns whether i_op->lookup is necessary.
*
* dir->d_inode->i_mutex must be held
*/
static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
struct nameidata *nd, bool *need_lookup)
{
struct dentry *dentry;
int error;
*need_lookup = false;
dentry = d_lookup(dir, name);
if (dentry) {
if (d_need_lookup(dentry)) {
*need_lookup = true;
} else if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
error = d_revalidate(dentry, nd);
if (unlikely(error <= 0)) {
if (error < 0) {
dput(dentry);
return ERR_PTR(error);
} else if (!d_invalidate(dentry)) {
dput(dentry);
dentry = NULL;
}
}
}
}
if (!dentry) {
dentry = d_alloc(dir, name);
if (unlikely(!dentry))
return ERR_PTR(-ENOMEM);
*need_lookup = true;
}
return dentry;
}
/*
* Call i_op->lookup on the dentry. The dentry must be negative but may be
* hashed if it was pouplated with DCACHE_NEED_LOOKUP.
*
* dir->d_inode->i_mutex must be held
*/
static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct dentry *old;
/* Don't create child dentry for a dead directory. */
if (unlikely(IS_DEADDIR(dir))) {
dput(dentry);
return ERR_PTR(-ENOENT);
}
old = dir->i_op->lookup(dir, dentry, nd);
if (unlikely(old)) {
dput(dentry);
dentry = old;
}
return dentry;
}
static struct dentry *__lookup_hash(struct qstr *name,
struct dentry *base, struct nameidata *nd)
{
bool need_lookup;
struct dentry *dentry;
dentry = lookup_dcache(name, base, nd, &need_lookup);
if (!need_lookup)
return dentry;
return lookup_real(base->d_inode, dentry, nd);
}
/*
* It's more convoluted than I'd like it to be, but... it's still fairly
* small and for now I'd prefer to have fast path as straight as possible.
* It _is_ time-critical.
*/
static int do_lookup(struct nameidata *nd, struct qstr *name,
struct path *path, struct inode **inode)
{
struct vfsmount *mnt = nd->path.mnt;
struct dentry *dentry, *parent = nd->path.dentry;
int need_reval = 1;
int status = 1;
int err;
/*
* Rename seqlock is not required here because in the off chance
* of a false negative due to a concurrent rename, we're going to
* do the non-racy lookup, below.
*/
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
*inode = nd->inode;
dentry = __d_lookup_rcu(parent, name, &seq, inode);
if (!dentry)
goto unlazy;
/* Memory barrier in read_seqcount_begin of child is enough */
if (__read_seqcount_retry(&parent->d_seq, nd->seq))
return -ECHILD;
nd->seq = seq;
if (unlikely(d_need_lookup(dentry)))
goto unlazy;
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
status = d_revalidate(dentry, nd);
if (unlikely(status <= 0)) {
if (status != -ECHILD)
need_reval = 0;
goto unlazy;
}
}
path->mnt = mnt;
path->dentry = dentry;
if (unlikely(!__follow_mount_rcu(nd, path, inode)))
goto unlazy;
if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
goto unlazy;
return 0;
unlazy:
if (unlazy_walk(nd, dentry))
return -ECHILD;
} else {
dentry = __d_lookup(parent, name);
}
if (unlikely(!dentry))
goto need_lookup;
if (unlikely(d_need_lookup(dentry))) {
dput(dentry);
goto need_lookup;
}
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
status = d_revalidate(dentry, nd);
if (unlikely(status <= 0)) {
if (status < 0) {
dput(dentry);
return status;
}
if (!d_invalidate(dentry)) {
dput(dentry);
goto need_lookup;
}
}
done:
path->mnt = mnt;
path->dentry = dentry;
err = follow_managed(path, nd->flags);
if (unlikely(err < 0)) {
path_put_conditional(path, nd);
return err;
}
if (err)
nd->flags |= LOOKUP_JUMPED;
*inode = path->dentry->d_inode;
return 0;
need_lookup:
BUG_ON(nd->inode != parent->d_inode);
mutex_lock(&parent->d_inode->i_mutex);
dentry = __lookup_hash(name, parent, nd);
mutex_unlock(&parent->d_inode->i_mutex);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
goto done;
}
static inline int may_lookup(struct nameidata *nd)
{
if (nd->flags & LOOKUP_RCU) {
int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
if (err != -ECHILD)
return err;
if (unlazy_walk(nd, NULL))
return -ECHILD;
}
return inode_permission(nd->inode, MAY_EXEC);
}
static inline int handle_dots(struct nameidata *nd, int type)
{
if (type == LAST_DOTDOT) {
if (nd->flags & LOOKUP_RCU) {
if (follow_dotdot_rcu(nd))
return -ECHILD;
} else
follow_dotdot(nd);
}
return 0;
}
static void terminate_walk(struct nameidata *nd)
{
if (!(nd->flags & LOOKUP_RCU)) {
path_put(&nd->path);
} else {
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
br_read_unlock(&vfsmount_lock);
}
}
/*
* Do we need to follow links? We _really_ want to be able
* to do this check without having to look at inode->i_op,
* so we keep a cache of "no, this doesn't need follow_link"
* for the common case.
*/
static inline int should_follow_link(struct inode *inode, int follow)
{
if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
if (likely(inode->i_op->follow_link))
return follow;
/* This gets set once for the inode lifetime */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_NOFOLLOW;
spin_unlock(&inode->i_lock);
}
return 0;
}
static inline int walk_component(struct nameidata *nd, struct path *path,
struct qstr *name, int type, int follow)
{
struct inode *inode;
int err;
/*
* "." and ".." are special - ".." especially so because it has
* to be able to know about the current root directory and
* parent relationships.
*/
if (unlikely(type != LAST_NORM))
return handle_dots(nd, type);
err = do_lookup(nd, name, path, &inode);
if (unlikely(err)) {
terminate_walk(nd);
return err;
}
if (!inode) {
path_to_nameidata(path, nd);
terminate_walk(nd);
return -ENOENT;
}
if (should_follow_link(inode, follow)) {
if (nd->flags & LOOKUP_RCU) {
if (unlikely(unlazy_walk(nd, path->dentry))) {
terminate_walk(nd);
return -ECHILD;
}
}
BUG_ON(inode != path->dentry->d_inode);
return 1;
}
path_to_nameidata(path, nd);
nd->inode = inode;
return 0;
}
/*
* This limits recursive symlink follows to 8, while
* limiting consecutive symlinks to 40.
*
* Without that kind of total limit, nasty chains of consecutive
* symlinks can cause almost arbitrarily long lookups.
*/
static inline int nested_symlink(struct path *path, struct nameidata *nd)
{
int res;
if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
path_put_conditional(path, nd);
path_put(&nd->path);
return -ELOOP;
}
BUG_ON(nd->depth >= MAX_NESTED_LINKS);
nd->depth++;
current->link_count++;
do {
struct path link = *path;
void *cookie;
res = follow_link(&link, nd, &cookie);
if (!res)
res = walk_component(nd, path, &nd->last,
nd->last_type, LOOKUP_FOLLOW);
put_link(nd, &link, cookie);
} while (res > 0);
current->link_count--;
nd->depth--;
return res;
}
/*
* We really don't want to look at inode->i_op->lookup
* when we don't have to. So we keep a cache bit in
* the inode ->i_opflags field that says "yes, we can
* do lookup on this inode".
*/
static inline int can_lookup(struct inode *inode)
{
if (likely(inode->i_opflags & IOP_LOOKUP))
return 1;
if (likely(!inode->i_op->lookup))
return 0;
/* We do this once for the lifetime of the inode */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_LOOKUP;
spin_unlock(&inode->i_lock);
return 1;
}
/*
* We can do the critical dentry name comparison and hashing
* operations one word at a time, but we are limited to:
*
* - Architectures with fast unaligned word accesses. We could
* do a "get_unaligned()" if this helps and is sufficiently
* fast.
*
* - Little-endian machines (so that we can generate the mask
* of low bytes efficiently). Again, we *could* do a byte
* swapping load on big-endian architectures if that is not
* expensive enough to make the optimization worthless.
*
* - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
* do not trap on the (extremely unlikely) case of a page
* crossing operation.
*
* - Furthermore, we need an efficient 64-bit compile for the
* 64-bit case in order to generate the "number of bytes in
* the final mask". Again, that could be replaced with a
* efficient population count instruction or similar.
*/
#ifdef CONFIG_DCACHE_WORD_ACCESS
#include <asm/word-at-a-time.h>
#ifdef CONFIG_64BIT
static inline unsigned int fold_hash(unsigned long hash)
{
return hash_64(hash, 32);
}
#else /* 32-bit case */
#define fold_hash(x) (x)
#endif
unsigned int full_name_hash(const unsigned char *name, unsigned int len)
{
unsigned long a, mask;
unsigned long hash = 0;
for (;;) {
a = load_unaligned_zeropad(name);
if (len < sizeof(unsigned long))
break;
hash += a;
hash *= 9;
name += sizeof(unsigned long);
len -= sizeof(unsigned long);
if (!len)
goto done;
}
mask = ~(~0ul << len*8);
hash += mask & a;
done:
return fold_hash(hash);
}
EXPORT_SYMBOL(full_name_hash);
/*
* Calculate the length and hash of the path component, and
* return the length of the component;
*/
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
unsigned long a, mask, hash, len;
hash = a = 0;
len = -sizeof(unsigned long);
do {
hash = (hash + a) * 9;
len += sizeof(unsigned long);
a = load_unaligned_zeropad(name+len);
/* Do we have any NUL or '/' bytes in this word? */
mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/'));
} while (!mask);
/* The mask *below* the first high bit set */
mask = (mask - 1) & ~mask;
mask >>= 7;
hash += a & mask;
*hashp = fold_hash(hash);
return len + count_masked_bytes(mask);
}
#else
unsigned int full_name_hash(const unsigned char *name, unsigned int len)
{
unsigned long hash = init_name_hash();
while (len--)
hash = partial_name_hash(*name++, hash);
return end_name_hash(hash);
}
EXPORT_SYMBOL(full_name_hash);
/*
* We know there's a real path component here of at least
* one character.
*/
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
unsigned long hash = init_name_hash();
unsigned long len = 0, c;
c = (unsigned char)*name;
do {
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
} while (c && c != '/');
*hashp = end_name_hash(hash);
return len;
}
#endif
/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
*
* Returns 0 and nd will have valid dentry and mnt on success.
* Returns error and drops reference to input namei data on failure.
*/
static int link_path_walk(const char *name, struct nameidata *nd)
{
struct path next;
int err;
while (*name=='/')
name++;
if (!*name)
return 0;
/* At this point we know we have a real path component. */
for(;;) {
struct qstr this;
long len;
int type;
err = may_lookup(nd);
if (err)
break;
len = hash_name(name, &this.hash);
this.name = name;
this.len = len;
type = LAST_NORM;
if (name[0] == '.') switch (len) {
case 2:
if (name[1] == '.') {
type = LAST_DOTDOT;
nd->flags |= LOOKUP_JUMPED;
}
break;
case 1:
type = LAST_DOT;
}
if (likely(type == LAST_NORM)) {
struct dentry *parent = nd->path.dentry;
nd->flags &= ~LOOKUP_JUMPED;
if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
err = parent->d_op->d_hash(parent, nd->inode,
&this);
if (err < 0)
break;
}
}
if (!name[len])
goto last_component;
/*
* If it wasn't NUL, we know it was '/'. Skip that
* slash, and continue until no more slashes.
*/
do {
len++;
} while (unlikely(name[len] == '/'));
if (!name[len])
goto last_component;
name += len;
err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW);
if (err < 0)
return err;
if (err) {
err = nested_symlink(&next, nd);
if (err)
return err;
}
if (can_lookup(nd->inode))
continue;
err = -ENOTDIR;
break;
/* here ends the main loop */
last_component:
nd->last = this;
nd->last_type = type;
return 0;
}
terminate_walk(nd);
return err;
}
static int path_init(int dfd, const char *name, unsigned int flags,
struct nameidata *nd, struct file **fp)
{
int retval = 0;
int fput_needed;
struct file *file;
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED;
nd->depth = 0;
if (flags & LOOKUP_ROOT) {
struct inode *inode = nd->root.dentry->d_inode;
if (*name) {
if (!inode->i_op->lookup)
return -ENOTDIR;
retval = inode_permission(inode, MAY_EXEC);
if (retval)
return retval;
}
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
br_read_lock(&vfsmount_lock);
rcu_read_lock();
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
} else {
path_get(&nd->path);
}
return 0;
}
nd->root.mnt = NULL;
if (*name=='/') {
if (flags & LOOKUP_RCU) {
br_read_lock(&vfsmount_lock);
rcu_read_lock();
set_root_rcu(nd);
} else {
set_root(nd);
path_get(&nd->root);
}
nd->path = nd->root;
} else if (dfd == AT_FDCWD) {
if (flags & LOOKUP_RCU) {
struct fs_struct *fs = current->fs;
unsigned seq;
br_read_lock(&vfsmount_lock);
rcu_read_lock();
do {
seq = read_seqcount_begin(&fs->seq);
nd->path = fs->pwd;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
} while (read_seqcount_retry(&fs->seq, seq));
} else {
get_fs_pwd(current->fs, &nd->path);
}
} else {
struct dentry *dentry;
file = fget_raw_light(dfd, &fput_needed);
retval = -EBADF;
if (!file)
goto out_fail;
dentry = file->f_path.dentry;
if (*name) {
retval = -ENOTDIR;
if (!S_ISDIR(dentry->d_inode->i_mode))
goto fput_fail;
retval = inode_permission(dentry->d_inode, MAY_EXEC);
if (retval)
goto fput_fail;
}
nd->path = file->f_path;
if (flags & LOOKUP_RCU) {
if (fput_needed)
*fp = file;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
br_read_lock(&vfsmount_lock);
rcu_read_lock();
} else {
path_get(&file->f_path);
fput_light(file, fput_needed);
}
}
nd->inode = nd->path.dentry->d_inode;
return 0;
fput_fail:
fput_light(file, fput_needed);
out_fail:
return retval;
}
static inline int lookup_last(struct nameidata *nd, struct path *path)
{
if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
nd->flags &= ~LOOKUP_PARENT;
return walk_component(nd, path, &nd->last, nd->last_type,
nd->flags & LOOKUP_FOLLOW);
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
static int path_lookupat(int dfd, const char *name,
unsigned int flags, struct nameidata *nd)
{
struct file *base = NULL;
struct path path;
int err;
/*
* Path walking is largely split up into 2 different synchronisation
* schemes, rcu-walk and ref-walk (explained in
* Documentation/filesystems/path-lookup.txt). These share much of the
* path walk code, but some things particularly setup, cleanup, and
* following mounts are sufficiently divergent that functions are
* duplicated. Typically there is a function foo(), and its RCU
* analogue, foo_rcu().
*
* -ECHILD is the error number of choice (just to avoid clashes) that
* is returned if some aspect of an rcu-walk fails. Such an error must
* be handled by restarting a traditional ref-walk (which will always
* be able to complete).
*/
err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
if (unlikely(err))
goto out;
current->total_link_count = 0;
err = link_path_walk(name, nd);
if (!err && !(flags & LOOKUP_PARENT)) {
err = lookup_last(nd, &path);
while (err > 0) {
void *cookie;
struct path link = path;
nd->flags |= LOOKUP_PARENT;
err = follow_link(&link, nd, &cookie);
if (!err)
err = lookup_last(nd, &path);
put_link(nd, &link, cookie);
}
}
if (!err)
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!nd->inode->i_op->lookup) {
path_put(&nd->path);
err = -ENOTDIR;
}
}
out:
if (base)
fput(base);
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
path_put(&nd->root);
nd->root.mnt = NULL;
}
return err;
}
static int do_path_lookup(int dfd, const char *name,
unsigned int flags, struct nameidata *nd)
{
int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
if (unlikely(retval == -ECHILD))
retval = path_lookupat(dfd, name, flags, nd);
if (unlikely(retval == -ESTALE))
retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
if (likely(!retval)) {
if (unlikely(!audit_dummy_context())) {
if (nd->path.dentry && nd->inode)
audit_inode(name, nd->path.dentry);
}
}
return retval;
}
/* does lookup, returns the object with parent locked */
struct dentry *kern_path_locked(const char *name, struct path *path)
{
struct nameidata nd;
struct dentry *d;
int err = do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, &nd);
if (err)
return ERR_PTR(err);
if (nd.last_type != LAST_NORM) {
path_put(&nd.path);
return ERR_PTR(-EINVAL);
}
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
d = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
if (IS_ERR(d)) {
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
path_put(&nd.path);
return d;
}
*path = nd.path;
return d;
}
int kern_path(const char *name, unsigned int flags, struct path *path)
{
struct nameidata nd;
int res = do_path_lookup(AT_FDCWD, name, flags, &nd);
if (!res)
*path = nd.path;
return res;
}
/**
* vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
* @dentry: pointer to dentry of the base directory
* @mnt: pointer to vfs mount of the base directory
* @name: pointer to file name
* @flags: lookup flags
* @path: pointer to struct path to fill
*/
int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
const char *name, unsigned int flags,
struct path *path)
{
struct nameidata nd;
int err;
nd.root.dentry = dentry;
nd.root.mnt = mnt;
BUG_ON(flags & LOOKUP_PARENT);
/* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd);
if (!err)
*path = nd.path;
return err;
}
/*
* Restricted form of lookup. Doesn't follow links, single-component only,
* needs parent already locked. Doesn't follow mounts.
* SMP-safe.
*/
static struct dentry *lookup_hash(struct nameidata *nd)
{
return __lookup_hash(&nd->last, nd->path.dentry, nd);
}
/**
* lookup_one_len - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
* @len: maximum length @len should be interpreted to
*
* Note that this routine is purely a helper for filesystem usage and should
* not be called by generic code. Also note that by using this function the
* nameidata argument is passed to the filesystem methods and a filesystem
* using this helper needs to be prepared for that.
*/
struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
{
struct qstr this;
unsigned int c;
int err;
WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
this.name = name;
this.len = len;
this.hash = full_name_hash(name, len);
if (!len)
return ERR_PTR(-EACCES);
while (len--) {
c = *(const unsigned char *)name++;
if (c == '/' || c == '\0')
return ERR_PTR(-EACCES);
}
/*
* See if the low-level filesystem might want
* to use its own hash..
*/
if (base->d_flags & DCACHE_OP_HASH) {
int err = base->d_op->d_hash(base, base->d_inode, &this);
if (err < 0)
return ERR_PTR(err);
}
err = inode_permission(base->d_inode, MAY_EXEC);
if (err)
return ERR_PTR(err);
return __lookup_hash(&this, base, NULL);
}
int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
struct path *path, int *empty)
{
struct nameidata nd;
char *tmp = getname_flags(name, flags, empty);
int err = PTR_ERR(tmp);
if (!IS_ERR(tmp)) {
BUG_ON(flags & LOOKUP_PARENT);
err = do_path_lookup(dfd, tmp, flags, &nd);
putname(tmp);
if (!err)
*path = nd.path;
}
return err;
}
int user_path_at(int dfd, const char __user *name, unsigned flags,
struct path *path)
{
return user_path_at_empty(dfd, name, flags, path, NULL);
}
static int user_path_parent(int dfd, const char __user *path,
struct nameidata *nd, char **name)
{
char *s = getname(path);
int error;
if (IS_ERR(s))
return PTR_ERR(s);
error = do_path_lookup(dfd, s, LOOKUP_PARENT, nd);
if (error)
putname(s);
else
*name = s;
return error;
}
/*
* It's inline, so penalty for filesystems that don't use sticky bit is
* minimal.
*/
static inline int check_sticky(struct inode *dir, struct inode *inode)
{
uid_t fsuid = current_fsuid();
if (!(dir->i_mode & S_ISVTX))
return 0;
if (current_user_ns() != inode_userns(inode))
goto other_userns;
if (inode->i_uid == fsuid)
return 0;
if (dir->i_uid == fsuid)
return 0;
other_userns:
return !ns_capable(inode_userns(inode), CAP_FOWNER);
}
/*
* Check whether we can remove a link victim from directory dir, check
* whether the type of victim is right.
* 1. We can't do it if dir is read-only (done in permission())
* 2. We should have write and exec permissions on dir
* 3. We can't remove anything from append-only dir
* 4. We can't do anything with immutable dir (done in permission())
* 5. If the sticky bit on dir is set we should either
* a. be owner of dir, or
* b. be owner of victim, or
* c. have CAP_FOWNER capability
* 6. If the victim is append-only or immutable we can't do antyhing with
* links pointing to it.
* 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
* 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
* 9. We can't remove a root or mountpoint.
* 10. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
{
int error;
if (!victim->d_inode)
return -ENOENT;
BUG_ON(victim->d_parent->d_inode != dir);
audit_inode_child(victim, dir);
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
return -EPERM;
if (isdir) {
if (!S_ISDIR(victim->d_inode->i_mode))
return -ENOTDIR;
if (IS_ROOT(victim))
return -EBUSY;
} else if (S_ISDIR(victim->d_inode->i_mode))
return -EISDIR;
if (IS_DEADDIR(dir))
return -ENOENT;
if (victim->d_flags & DCACHE_NFSFS_RENAMED)
return -EBUSY;
return 0;
}
/* Check whether we can create an object with dentry child in directory
* dir.
* 1. We can't do it if child already exists (open has special treatment for
* this case, but since we are inlined it's OK)
* 2. We can't do it if dir is read-only (done in permission())
* 3. We should have write and exec permissions on dir
* 4. We can't do it if dir is immutable (done in permission())
*/
static inline int may_create(struct inode *dir, struct dentry *child)
{
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
/*
* p1 and p2 should be directories on the same fs.
*/
struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
{
struct dentry *p;
if (p1 == p2) {
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
return NULL;
}
mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
p = d_ancestor(p2, p1);
if (p) {
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD);
return p;
}
p = d_ancestor(p1, p2);
if (p) {
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
return p;
}
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
return NULL;
}
void unlock_rename(struct dentry *p1, struct dentry *p2)
{
mutex_unlock(&p1->d_inode->i_mutex);
if (p1 != p2) {
mutex_unlock(&p2->d_inode->i_mutex);
mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
}
}
int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
int error = may_create(dir, dentry);
if (error)
return error;
if (!dir->i_op->create)
return -EACCES; /* shouldn't it be ENOSYS? */
mode &= S_IALLUGO;
mode |= S_IFREG;
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, nd);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
static int may_open(struct path *path, int acc_mode, int flag)
{
struct dentry *dentry = path->dentry;
struct inode *inode = dentry->d_inode;
int error;
/* O_PATH? */
if (!acc_mode)
return 0;
if (!inode)
return -ENOENT;
switch (inode->i_mode & S_IFMT) {
case S_IFLNK:
return -ELOOP;
case S_IFDIR:
if (acc_mode & MAY_WRITE)
return -EISDIR;
break;
case S_IFBLK:
case S_IFCHR:
if (path->mnt->mnt_flags & MNT_NODEV)
return -EACCES;
/*FALLTHRU*/
case S_IFIFO:
case S_IFSOCK:
flag &= ~O_TRUNC;
break;
}
error = inode_permission(inode, acc_mode);
if (error)
return error;
/*
* An append-only file must be opened in append mode for writing.
*/
if (IS_APPEND(inode)) {
if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
return -EPERM;
if (flag & O_TRUNC)
return -EPERM;
}
/* O_NOATIME can only be set by the owner or superuser */
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
return 0;
}
static int handle_truncate(struct file *filp)
{
struct path *path = &filp->f_path;
struct inode *inode = path->dentry->d_inode;
int error = get_write_access(inode);
if (error)
return error;
/*
* Refuse to truncate files with mandatory locks held on them.
*/
error = locks_verify_locked(inode);
if (!error)
error = security_path_truncate(path);
if (!error) {
error = do_truncate(path->dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
filp);
}
put_write_access(inode);
return error;
}
static inline int open_to_namei_flags(int flag)
{
if ((flag & O_ACCMODE) == 3)
flag--;
return flag;
}
/*
* Handle the last step of open()
*/
static struct file *do_last(struct nameidata *nd, struct path *path,
const struct open_flags *op, const char *pathname)
{
struct dentry *dir = nd->path.dentry;
struct dentry *dentry;
int open_flag = op->open_flag;
int will_truncate = open_flag & O_TRUNC;
int want_write = 0;
int acc_mode = op->acc_mode;
struct file *filp;
int error;
nd->flags &= ~LOOKUP_PARENT;
nd->flags |= op->intent;
switch (nd->last_type) {
case LAST_DOTDOT:
case LAST_DOT:
error = handle_dots(nd, nd->last_type);
if (error)
return ERR_PTR(error);
/* fallthrough */
case LAST_ROOT:
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
audit_inode(pathname, nd->path.dentry);
if (open_flag & O_CREAT) {
error = -EISDIR;
goto exit;
}
goto ok;
case LAST_BIND:
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
audit_inode(pathname, dir);
goto ok;
}
if (!(open_flag & O_CREAT)) {
int symlink_ok = 0;
if (nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
symlink_ok = 1;
/* we _can_ be in RCU mode here */
error = walk_component(nd, path, &nd->last, LAST_NORM,
!symlink_ok);
if (error < 0)
return ERR_PTR(error);
if (error) /* symlink */
return NULL;
/* sayonara */
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
error = -ENOTDIR;
if (nd->flags & LOOKUP_DIRECTORY) {
if (!nd->inode->i_op->lookup)
goto exit;
}
audit_inode(pathname, nd->path.dentry);
goto ok;
}
/* create side of things */
/*
* This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been
* cleared when we got to the last component we are about to look up
*/
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
audit_inode(pathname, dir);
error = -EISDIR;
/* trailing slashes? */
if (nd->last.name[nd->last.len])
goto exit;
mutex_lock(&dir->d_inode->i_mutex);
dentry = lookup_hash(nd);
error = PTR_ERR(dentry);
if (IS_ERR(dentry)) {
mutex_unlock(&dir->d_inode->i_mutex);
goto exit;
}
path->dentry = dentry;
path->mnt = nd->path.mnt;
/* Negative dentry, just create the file */
if (!dentry->d_inode) {
umode_t mode = op->mode;
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
* This write is needed to ensure that a
* rw->ro transition does not occur between
* the time when the file is created and when
* a permanent write count is taken through
* the 'struct file' in nameidata_to_filp().
*/
error = mnt_want_write(nd->path.mnt);
if (error)
goto exit_mutex_unlock;
want_write = 1;
/* Don't check for write permission, don't truncate */
open_flag &= ~O_TRUNC;
will_truncate = 0;
acc_mode = MAY_OPEN;
error = security_path_mknod(&nd->path, dentry, mode, 0);
if (error)
goto exit_mutex_unlock;
error = vfs_create(dir->d_inode, dentry, mode, nd);
if (error)
goto exit_mutex_unlock;
mutex_unlock(&dir->d_inode->i_mutex);
dput(nd->path.dentry);
nd->path.dentry = dentry;
goto common;
}
/*
* It already exists.
*/
mutex_unlock(&dir->d_inode->i_mutex);
audit_inode(pathname, path->dentry);
error = -EEXIST;
if (open_flag & O_EXCL)
goto exit_dput;
error = follow_managed(path, nd->flags);
if (error < 0)
goto exit_dput;
if (error)
nd->flags |= LOOKUP_JUMPED;
error = -ENOENT;
if (!path->dentry->d_inode)
goto exit_dput;
if (path->dentry->d_inode->i_op->follow_link)
return NULL;
path_to_nameidata(path, nd);
nd->inode = path->dentry->d_inode;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
error = -EISDIR;
if (S_ISDIR(nd->inode->i_mode))
goto exit;
ok:
if (!S_ISREG(nd->inode->i_mode))
will_truncate = 0;
if (will_truncate) {
error = mnt_want_write(nd->path.mnt);
if (error)
goto exit;
want_write = 1;
}
common:
error = may_open(&nd->path, acc_mode, open_flag);
if (error)
goto exit;
filp = nameidata_to_filp(nd);
if (!IS_ERR(filp)) {
error = ima_file_check(filp, op->acc_mode);
if (error) {
fput(filp);
filp = ERR_PTR(error);
}
}
if (!IS_ERR(filp)) {
if (will_truncate) {
error = handle_truncate(filp);
if (error) {
fput(filp);
filp = ERR_PTR(error);
}
}
}
out:
if (want_write)
mnt_drop_write(nd->path.mnt);
path_put(&nd->path);
return filp;
exit_mutex_unlock:
mutex_unlock(&dir->d_inode->i_mutex);
exit_dput:
path_put_conditional(path, nd);
exit:
filp = ERR_PTR(error);
goto out;
}
static struct file *path_openat(int dfd, const char *pathname,
struct nameidata *nd, const struct open_flags *op, int flags)
{
struct file *base = NULL;
struct file *filp;
struct path path;
int error;
filp = get_empty_filp();
if (!filp)
return ERR_PTR(-ENFILE);
filp->f_flags = op->open_flag;
nd->intent.open.file = filp;
nd->intent.open.flags = open_to_namei_flags(op->open_flag);
nd->intent.open.create_mode = op->mode;
error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base);
if (unlikely(error))
goto out_filp;
current->total_link_count = 0;
error = link_path_walk(pathname, nd);
if (unlikely(error))
goto out_filp;
filp = do_last(nd, &path, op, pathname);
while (unlikely(!filp)) { /* trailing symlink */
struct path link = path;
void *cookie;
if (!(nd->flags & LOOKUP_FOLLOW)) {
path_put_conditional(&path, nd);
path_put(&nd->path);
filp = ERR_PTR(-ELOOP);
break;
}
nd->flags |= LOOKUP_PARENT;
nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
filp = ERR_PTR(error);
else
filp = do_last(nd, &path, op, pathname);
put_link(nd, &link, cookie);
}
out:
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
path_put(&nd->root);
if (base)
fput(base);
release_open_intent(nd);
return filp;
out_filp:
filp = ERR_PTR(error);
goto out;
}
struct file *do_filp_open(int dfd, const char *pathname,
const struct open_flags *op, int flags)
{
struct nameidata nd;
struct file *filp;
filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU);
if (unlikely(filp == ERR_PTR(-ECHILD)))
filp = path_openat(dfd, pathname, &nd, op, flags);
if (unlikely(filp == ERR_PTR(-ESTALE)))
filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL);
return filp;
}
struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
const char *name, const struct open_flags *op, int flags)
{
struct nameidata nd;
struct file *file;
nd.root.mnt = mnt;
nd.root.dentry = dentry;
flags |= LOOKUP_ROOT;
if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN)
return ERR_PTR(-ELOOP);
file = path_openat(-1, name, &nd, op, flags | LOOKUP_RCU);
if (unlikely(file == ERR_PTR(-ECHILD)))
file = path_openat(-1, name, &nd, op, flags);
if (unlikely(file == ERR_PTR(-ESTALE)))
file = path_openat(-1, name, &nd, op, flags | LOOKUP_REVAL);
return file;
}
struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct nameidata nd;
int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd);
if (error)
return ERR_PTR(error);
/*
* Yucky last component or no last component at all?
* (foo/., foo/.., /////)
*/
if (nd.last_type != LAST_NORM)
goto out;
nd.flags &= ~LOOKUP_PARENT;
nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL;
nd.intent.open.flags = O_EXCL;
/*
* Do the final lookup.
*/
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
if (IS_ERR(dentry))
goto fail;
if (dentry->d_inode)
goto eexist;
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
* all is fine. Let's be bastards - you had / on the end, you've
* been asking for (non-existent) directory. -ENOENT for you.
*/
if (unlikely(!is_dir && nd.last.name[nd.last.len])) {
dput(dentry);
dentry = ERR_PTR(-ENOENT);
goto fail;
}
*path = nd.path;
return dentry;
eexist:
dput(dentry);
dentry = ERR_PTR(-EEXIST);
fail:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
out:
path_put(&nd.path);
return dentry;
}
EXPORT_SYMBOL(kern_path_create);
struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir)
{
char *tmp = getname(pathname);
struct dentry *res;
if (IS_ERR(tmp))
return ERR_CAST(tmp);
res = kern_path_create(dfd, tmp, path, is_dir);
putname(tmp);
return res;
}
EXPORT_SYMBOL(user_path_create);
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
if (error)
return error;
if ((S_ISCHR(mode) || S_ISBLK(mode)) &&
!ns_capable(inode_userns(dir), CAP_MKNOD))
return -EPERM;
if (!dir->i_op->mknod)
return -EPERM;
error = devcgroup_inode_mknod(mode, dev);
if (error)
return error;
error = security_inode_mknod(dir, dentry, mode, dev);
if (error)
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
static int may_mknod(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFREG:
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
case 0: /* zero mode translates to S_IFREG */
return 0;
case S_IFDIR:
return -EPERM;
default:
return -EINVAL;
}
}
SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
unsigned, dev)
{
struct dentry *dentry;
struct path path;
int error;
if (S_ISDIR(mode))
return -EPERM;
dentry = user_path_create(dfd, filename, &path, 0);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
error = may_mknod(mode);
if (error)
goto out_dput;
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out_drop_write;
switch (mode & S_IFMT) {
case 0: case S_IFREG:
error = vfs_create(path.dentry->d_inode,dentry,mode,NULL);
break;
case S_IFCHR: case S_IFBLK:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,
new_decode_dev(dev));
break;
case S_IFIFO: case S_IFSOCK:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
break;
}
out_drop_write:
mnt_drop_write(path.mnt);
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
path_put(&path);
return error;
}
SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
{
return sys_mknodat(AT_FDCWD, filename, mode, dev);
}
int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int error = may_create(dir, dentry);
unsigned max_links = dir->i_sb->s_max_links;
if (error)
return error;
if (!dir->i_op->mkdir)
return -EPERM;
mode &= (S_IRWXUGO|S_ISVTX);
error = security_inode_mkdir(dir, dentry, mode);
if (error)
return error;
if (max_links && dir->i_nlink >= max_links)
return -EMLINK;
error = dir->i_op->mkdir(dir, dentry, mode);
if (!error)
fsnotify_mkdir(dir, dentry);
return error;
}
SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
{
struct dentry *dentry;
struct path path;
int error;
dentry = user_path_create(dfd, pathname, &path, 1);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
error = security_path_mkdir(&path, dentry, mode);
if (error)
goto out_drop_write;
error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
out_drop_write:
mnt_drop_write(path.mnt);
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
path_put(&path);
return error;
}
SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
{
return sys_mkdirat(AT_FDCWD, pathname, mode);
}
/*
* The dentry_unhash() helper will try to drop the dentry early: we
* should have a usage count of 1 if we're the only user of this
* dentry, and if that is true (possibly after pruning the dcache),
* then we drop the dentry now.
*
* A low-level filesystem can, if it choses, legally
* do a
*
* if (!d_unhashed(dentry))
* return -EBUSY;
*
* if it cannot handle the case of removing a directory
* that is still in use by something else..
*/
void dentry_unhash(struct dentry *dentry)
{
shrink_dcache_parent(dentry);
spin_lock(&dentry->d_lock);
if (dentry->d_count == 1)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
int vfs_rmdir(struct inode *dir, struct dentry *dentry)
{
int error = may_delete(dir, dentry, 1);
if (error)
return error;
if (!dir->i_op->rmdir)
return -EPERM;
dget(dentry);
mutex_lock(&dentry->d_inode->i_mutex);
error = -EBUSY;
if (d_mountpoint(dentry))
goto out;
error = security_inode_rmdir(dir, dentry);
if (error)
goto out;
shrink_dcache_parent(dentry);
error = dir->i_op->rmdir(dir, dentry);
if (error)
goto out;
dentry->d_inode->i_flags |= S_DEAD;
dont_mount(dentry);
out:
mutex_unlock(&dentry->d_inode->i_mutex);
dput(dentry);
if (!error)
d_delete(dentry);
return error;
}
static long do_rmdir(int dfd, const char __user *pathname)
{
int error = 0;
char * name;
struct dentry *dentry;
struct nameidata nd;
error = user_path_parent(dfd, pathname, &nd, &name);
if (error)
return error;
switch(nd.last_type) {
case LAST_DOTDOT:
error = -ENOTEMPTY;
goto exit1;
case LAST_DOT:
error = -EINVAL;
goto exit1;
case LAST_ROOT:
error = -EBUSY;
goto exit1;
}
nd.flags &= ~LOOKUP_PARENT;
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit2;
if (!dentry->d_inode) {
error = -ENOENT;
goto exit3;
}
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit3;
error = security_path_rmdir(&nd.path, dentry);
if (error)
goto exit4;
error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
exit4:
mnt_drop_write(nd.path.mnt);
exit3:
dput(dentry);
exit2:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
exit1:
path_put(&nd.path);
putname(name);
return error;
}
SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
{
return do_rmdir(AT_FDCWD, pathname);
}
int vfs_unlink(struct inode *dir, struct dentry *dentry)
{
int error = may_delete(dir, dentry, 0);
if (error)
return error;
if (!dir->i_op->unlink)
return -EPERM;
mutex_lock(&dentry->d_inode->i_mutex);
if (d_mountpoint(dentry))
error = -EBUSY;
else {
error = security_inode_unlink(dir, dentry);
if (!error) {
error = dir->i_op->unlink(dir, dentry);
if (!error)
dont_mount(dentry);
}
}
mutex_unlock(&dentry->d_inode->i_mutex);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
fsnotify_link_count(dentry->d_inode);
d_delete(dentry);
}
return error;
}
/*
* Make sure that the actual truncation of the file will occur outside its
* directory's i_mutex. Truncate can take a long time if there is a lot of
* writeout happening, and we don't want to prevent access to the directory
* while waiting on the I/O.
*/
static long do_unlinkat(int dfd, const char __user *pathname)
{
int error;
char *name;
struct dentry *dentry;
struct nameidata nd;
struct inode *inode = NULL;
error = user_path_parent(dfd, pathname, &nd, &name);
if (error)
return error;
error = -EISDIR;
if (nd.last_type != LAST_NORM)
goto exit1;
nd.flags &= ~LOOKUP_PARENT;
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
if (nd.last.name[nd.last.len])
goto slashes;
inode = dentry->d_inode;
if (!inode)
goto slashes;
ihold(inode);
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit2;
error = security_path_unlink(&nd.path, dentry);
if (error)
goto exit3;
error = vfs_unlink(nd.path.dentry->d_inode, dentry);
exit3:
mnt_drop_write(nd.path.mnt);
exit2:
dput(dentry);
}
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
if (inode)
iput(inode); /* truncate the inode here */
exit1:
path_put(&nd.path);
putname(name);
return error;
slashes:
error = !dentry->d_inode ? -ENOENT :
S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
goto exit2;
}
SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
{
if ((flag & ~AT_REMOVEDIR) != 0)
return -EINVAL;
if (flag & AT_REMOVEDIR)
return do_rmdir(dfd, pathname);
return do_unlinkat(dfd, pathname);
}
SYSCALL_DEFINE1(unlink, const char __user *, pathname)
{
return do_unlinkat(AT_FDCWD, pathname);
}
int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
{
int error = may_create(dir, dentry);
if (error)
return error;
if (!dir->i_op->symlink)
return -EPERM;
error = security_inode_symlink(dir, dentry, oldname);
if (error)
return error;
error = dir->i_op->symlink(dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
int error;
char *from;
struct dentry *dentry;
struct path path;
from = getname(oldname);
if (IS_ERR(from))
return PTR_ERR(from);
dentry = user_path_create(newdfd, newname, &path, 0);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_putname;
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
error = security_path_symlink(&path, dentry, from);
if (error)
goto out_drop_write;
error = vfs_symlink(path.dentry->d_inode, dentry, from);
out_drop_write:
mnt_drop_write(path.mnt);
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
path_put(&path);
out_putname:
putname(from);
return error;
}
SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
{
return sys_symlinkat(oldname, AT_FDCWD, newname);
}
int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
{
struct inode *inode = old_dentry->d_inode;
unsigned max_links = dir->i_sb->s_max_links;
int error;
if (!inode)
return -ENOENT;
error = may_create(dir, new_dentry);
if (error)
return error;
if (dir->i_sb != inode->i_sb)
return -EXDEV;
/*
* A link to an append-only or immutable file cannot be created.
*/
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
if (!dir->i_op->link)
return -EPERM;
if (S_ISDIR(inode->i_mode))
return -EPERM;
error = security_inode_link(old_dentry, dir, new_dentry);
if (error)
return error;
mutex_lock(&inode->i_mutex);
/* Make sure we don't allow creating hardlink to an unlinked file */
if (inode->i_nlink == 0)
error = -ENOENT;
else if (max_links && inode->i_nlink >= max_links)
error = -EMLINK;
else
error = dir->i_op->link(old_dentry, dir, new_dentry);
mutex_unlock(&inode->i_mutex);
if (!error)
fsnotify_link(dir, inode, new_dentry);
return error;
}
/*
* Hardlinks are often used in delicate situations. We avoid
* security-related surprises by not following symlinks on the
* newname. --KAB
*
* We don't follow them on the oldname either to be compatible
* with linux 2.0, and to avoid hard-linking to directories
* and other special files. --ADM
*/
SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname, int, flags)
{
struct dentry *new_dentry;
struct path old_path, new_path;
int how = 0;
int error;
if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
return -EINVAL;
/*
* To use null names we require CAP_DAC_READ_SEARCH
* This ensures that not everyone will be able to create
* handlink using the passed filedescriptor.
*/
if (flags & AT_EMPTY_PATH) {
if (!capable(CAP_DAC_READ_SEARCH))
return -ENOENT;
how = LOOKUP_EMPTY;
}
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
error = user_path_at(olddfd, oldname, how, &old_path);
if (error)
return error;
new_dentry = user_path_create(newdfd, newname, &new_path, 0);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out;
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto out_dput;
error = mnt_want_write(new_path.mnt);
if (error)
goto out_dput;
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
goto out_drop_write;
error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
out_drop_write:
mnt_drop_write(new_path.mnt);
out_dput:
dput(new_dentry);
mutex_unlock(&new_path.dentry->d_inode->i_mutex);
path_put(&new_path);
out:
path_put(&old_path);
return error;
}
SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
{
return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
}
/*
* The worst of all namespace operations - renaming directory. "Perverted"
* doesn't even start to describe it. Somebody in UCB had a heck of a trip...
* Problems:
* a) we can get into loop creation. Check is done in is_subdir().
* b) race potential - two innocent renames can create a loop together.
* That's where 4.4 screws up. Current fix: serialization on
* sb->s_vfs_rename_mutex. We might be more accurate, but that's another
* story.
* c) we have to lock _three_ objects - parents and victim (if it exists).
* And that - after we got ->i_mutex on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
* only under ->s_vfs_rename_mutex _and_ that parent of the object we
* move will be locked. Thus we can rank directories by the tree
* (ancestors first) and rank all non-directories after them.
* That works since everybody except rename does "lock parent, lookup,
* lock child" and rename is under ->s_vfs_rename_mutex.
* HOWEVER, it relies on the assumption that any object with ->lookup()
* has no more than 1 dentry. If "hybrid" objects will ever appear,
* we'd better make sure that there's no link(2) for them.
* d) conversion from fhandle to dentry may come in the wrong moment - when
* we are removing the target. Solution: we will have to grab ->i_mutex
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
* ->i_mutex on parents, which works but leads to some truly excessive
* locking].
*/
static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int error = 0;
struct inode *target = new_dentry->d_inode;
unsigned max_links = new_dir->i_sb->s_max_links;
/*
* If we are going to change the parent - check write permissions,
* we'll need to flip '..'.
*/
if (new_dir != old_dir) {
error = inode_permission(old_dentry->d_inode, MAY_WRITE);
if (error)
return error;
}
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
return error;
dget(new_dentry);
if (target)
mutex_lock(&target->i_mutex);
error = -EBUSY;
if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
goto out;
error = -EMLINK;
if (max_links && !target && new_dir != old_dir &&
new_dir->i_nlink >= max_links)
goto out;
if (target)
shrink_dcache_parent(new_dentry);
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
goto out;
if (target) {
target->i_flags |= S_DEAD;
dont_mount(new_dentry);
}
out:
if (target)
mutex_unlock(&target->i_mutex);
dput(new_dentry);
if (!error)
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
d_move(old_dentry,new_dentry);
return error;
}
static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct inode *target = new_dentry->d_inode;
int error;
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
return error;
dget(new_dentry);
if (target)
mutex_lock(&target->i_mutex);
error = -EBUSY;
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
goto out;
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
goto out;
if (target)
dont_mount(new_dentry);
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
d_move(old_dentry, new_dentry);
out:
if (target)
mutex_unlock(&target->i_mutex);
dput(new_dentry);
return error;
}
int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int error;
int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
const unsigned char *old_name;
if (old_dentry->d_inode == new_dentry->d_inode)
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
if (error)
return error;
if (!new_dentry->d_inode)
error = may_create(new_dir, new_dentry);
else
error = may_delete(new_dir, new_dentry, is_dir);
if (error)
return error;
if (!old_dir->i_op->rename)
return -EPERM;
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
if (is_dir)
error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
else
error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
if (!error)
fsnotify_move(old_dir, new_dir, old_name, is_dir,
new_dentry->d_inode, old_dentry);
fsnotify_oldname_free(old_name);
return error;
}
SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
struct dentry *old_dir, *new_dir;
struct dentry *old_dentry, *new_dentry;
struct dentry *trap;
struct nameidata oldnd, newnd;
char *from;
char *to;
int error;
error = user_path_parent(olddfd, oldname, &oldnd, &from);
if (error)
goto exit;
error = user_path_parent(newdfd, newname, &newnd, &to);
if (error)
goto exit1;
error = -EXDEV;
if (oldnd.path.mnt != newnd.path.mnt)
goto exit2;
old_dir = oldnd.path.dentry;
error = -EBUSY;
if (oldnd.last_type != LAST_NORM)
goto exit2;
new_dir = newnd.path.dentry;
if (newnd.last_type != LAST_NORM)
goto exit2;
oldnd.flags &= ~LOOKUP_PARENT;
newnd.flags &= ~LOOKUP_PARENT;
newnd.flags |= LOOKUP_RENAME_TARGET;
trap = lock_rename(new_dir, old_dir);
old_dentry = lookup_hash(&oldnd);
error = PTR_ERR(old_dentry);
if (IS_ERR(old_dentry))
goto exit3;
/* source must exist */
error = -ENOENT;
if (!old_dentry->d_inode)
goto exit4;
/* unless the source is a directory trailing slashes give -ENOTDIR */
if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
error = -ENOTDIR;
if (oldnd.last.name[oldnd.last.len])
goto exit4;
if (newnd.last.name[newnd.last.len])
goto exit4;
}
/* source should not be ancestor of target */
error = -EINVAL;
if (old_dentry == trap)
goto exit4;
new_dentry = lookup_hash(&newnd);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto exit4;
/* target should not be an ancestor of source */
error = -ENOTEMPTY;
if (new_dentry == trap)
goto exit5;
error = mnt_want_write(oldnd.path.mnt);
if (error)
goto exit5;
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry);
if (error)
goto exit6;
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry);
exit6:
mnt_drop_write(oldnd.path.mnt);
exit5:
dput(new_dentry);
exit4:
dput(old_dentry);
exit3:
unlock_rename(new_dir, old_dir);
exit2:
path_put(&newnd.path);
putname(to);
exit1:
path_put(&oldnd.path);
putname(from);
exit:
return error;
}
SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
{
return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname);
}
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
{
int len;
len = PTR_ERR(link);
if (IS_ERR(link))
goto out;
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
if (copy_to_user(buffer, link, len))
len = -EFAULT;
out:
return len;
}
/*
* A helper for ->readlink(). This should be used *ONLY* for symlinks that
* have ->follow_link() touching nd only in nd_set_link(). Using (or not
* using) it for any given inode is up to filesystem.
*/
int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct nameidata nd;
void *cookie;
int res;
nd.depth = 0;
cookie = dentry->d_inode->i_op->follow_link(dentry, &nd);
if (IS_ERR(cookie))
return PTR_ERR(cookie);
res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
if (dentry->d_inode->i_op->put_link)
dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
return res;
}
int vfs_follow_link(struct nameidata *nd, const char *link)
{
return __vfs_follow_link(nd, link);
}
/* get the link contents into pagecache */
static char *page_getlink(struct dentry * dentry, struct page **ppage)
{
char *kaddr;
struct page *page;
struct address_space *mapping = dentry->d_inode->i_mapping;
page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
return (char*)page;
*ppage = page;
kaddr = kmap(page);
nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1);
return kaddr;
}
int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct page *page = NULL;
char *s = page_getlink(dentry, &page);
int res = vfs_readlink(dentry,buffer,buflen,s);
if (page) {
kunmap(page);
page_cache_release(page);
}
return res;
}
void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
{
struct page *page = NULL;
nd_set_link(nd, page_getlink(dentry, &page));
return page;
}
void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
struct page *page = cookie;
if (page) {
kunmap(page);
page_cache_release(page);
}
}
/*
* The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
*/
int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
int err;
char *kaddr;
unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE;
if (nofs)
flags |= AOP_FLAG_NOFS;
retry:
err = pagecache_write_begin(NULL, mapping, 0, len-1,
flags, &page, &fsdata);
if (err)
goto fail;
kaddr = kmap_atomic(page);
memcpy(kaddr, symname, len-1);
kunmap_atomic(kaddr);
err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
page, fsdata);
if (err < 0)
goto fail;
if (err < len-1)
goto retry;
mark_inode_dirty(inode);
return 0;
fail:
return err;
}
int page_symlink(struct inode *inode, const char *symname, int len)
{
return __page_symlink(inode, symname, len,
!(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
}
const struct inode_operations page_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
};
EXPORT_SYMBOL(user_path_at);
EXPORT_SYMBOL(follow_down_one);
EXPORT_SYMBOL(follow_down);
EXPORT_SYMBOL(follow_up);
EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
EXPORT_SYMBOL(getname);
EXPORT_SYMBOL(lock_rename);
EXPORT_SYMBOL(lookup_one_len);
EXPORT_SYMBOL(page_follow_link_light);
EXPORT_SYMBOL(page_put_link);
EXPORT_SYMBOL(page_readlink);
EXPORT_SYMBOL(__page_symlink);
EXPORT_SYMBOL(page_symlink);
EXPORT_SYMBOL(page_symlink_inode_operations);
EXPORT_SYMBOL(kern_path);
EXPORT_SYMBOL(vfs_path_lookup);
EXPORT_SYMBOL(inode_permission);
EXPORT_SYMBOL(unlock_rename);
EXPORT_SYMBOL(vfs_create);
EXPORT_SYMBOL(vfs_follow_link);
EXPORT_SYMBOL(vfs_link);
EXPORT_SYMBOL(vfs_mkdir);
EXPORT_SYMBOL(vfs_mknod);
EXPORT_SYMBOL(generic_permission);
EXPORT_SYMBOL(vfs_readlink);
EXPORT_SYMBOL(vfs_rename);
EXPORT_SYMBOL(vfs_rmdir);
EXPORT_SYMBOL(vfs_symlink);
EXPORT_SYMBOL(vfs_unlink);
EXPORT_SYMBOL(dentry_unhash);
EXPORT_SYMBOL(generic_readlink);
| Pulshen/XKernel | fs/namei.c | C | gpl-2.0 | 87,242 |
/* a10 513
* Copyright (c) 2001-2012 Nicolas Léveillé <knos.free.fr>
*
* You should have received this file ('src/lib/cokus.c') with a license
* agreement. ('LICENSE' file)
*
* Copying, using, modifying and distributing this file are rights
* covered under this licensing agreement and are conditioned by its
* full acceptance and understanding.
* e 513 */
// This is the ``Mersenne Twister'' random number generator MT19937, which
// generates pseudorandom integers uniformly distributed in 0..(2^32 - 1)
// starting from any odd seed in 0..(2^32 - 1). This version is a recode
// by Shawn Cokus (Cokus@math.washington.edu) on March 8, 1998 of a version by
// Takuji Nishimura (who had suggestions from Topher Cooper and Marc Rieffel in
// July-August 1997).
//
// Effectiveness of the recoding (on Goedel2.math.washington.edu, a DEC Alpha
// running OSF/1) using GCC -O3 as a compiler: before recoding: 51.6 sec. to
// generate 300 million random numbers; after recoding: 24.0 sec. for the same
// (i.e., 46.5% of original time), so speed is now about 12.5 million random
// number generations per second on this machine.
//
// According to the URL <http://www.math.keio.ac.jp/~matumoto/emt.html>
// (and paraphrasing a bit in places), the Mersenne Twister is ``designed
// with consideration of the flaws of various existing generators,'' has
// a period of 2^19937 - 1, gives a sequence that is 623-dimensionally
// equidistributed, and ``has passed many stringent tests, including the
// die-hard test of G. Marsaglia and the load test of P. Hellekalek and
// S. Wegenkittl.'' It is efficient in memory usage (typically using 2506
// to 5012 bytes of static data, depending on data type sizes, and the code
// is quite short as well). It generates random numbers in batches of 624
// at a time, so the caching and pipelining of modern systems is exploited.
// It is also divide- and mod-free.
//
// This library is free software; you can redistribute it and/or modify it
// under the terms of the GNU Library General Public License as published by
// the Free Software Foundation (either version 2 of the License or, at your
// option, any later version). This library is distributed in the hope that
// it will be useful, but WITHOUT ANY WARRANTY, without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
// the GNU Library General Public License for more details. You should have
// received a copy of the GNU Library General Public License along with this
// library; if not, write to the Free Software Foundation, Inc., 59 Temple
// Place, Suite 330, Boston, MA 02111-1307, USA.
//
// The code as Shawn received it included the following notice:
//
// Copyright (C) 1997 Makoto Matsumoto and Takuji Nishimura. When
// you use this, send an e-mail to <matumoto@math.keio.ac.jp> with
// an appropriate reference to your work.
//
// It would be nice to CC: <Cokus@math.washington.edu> when you write.
//
#include <libc/stdio.h>
#include <libc/stdlib.h>
#include <libc/stdint.h>
//
// uint32_t must be an unsigned integer type capable of holding at least 32
// bits; exactly 32 should be fastest, but 64 is better on an Alpha with
// GCC at -O3 optimization so try your options and see what's best for you
//
#define N (624) // length of state vector
#define M (397) // a period parameter
#define K (0x9908B0DFU) // a magic constant
#define hiBit(u) ((u)&0x80000000U) // mask all but highest bit of u
#define loBit(u) ((u)&0x00000001U) // mask all but lowest bit of u
#define loBits(u) ((u)&0x7FFFFFFFU) // mask the highest bit of u
#define mixBits(u, v) (hiBit(u) | loBits(v)) // move hi bit of u to hi bit of v
static uint32_t state[N + 1]; // state vector + 1 extra to not violate ANSI C
static uint32_t *next; // next random value is computed from here
static int left = -1; // can *next++ this many times before reloading
extern void seedMT(uint32_t seed)
{
//
// We initialize state[0..(N-1)] via the generator
//
// x_new = (69069 * x_old) mod 2^32
//
// from Line 15 of Table 1, p. 106, Sec. 3.3.4 of Knuth's
// _The Art of Computer Programming_, Volume 2, 3rd ed.
//
// Notes (SJC): I do not know what the initial state requirements
// of the Mersenne Twister are, but it seems this seeding generator
// could be better. It achieves the maximum period for its modulus
// (2^30) iff x_initial is odd (p. 20-21, Sec. 3.2.1.2, Knuth); if
// x_initial can be even, you have sequences like 0, 0, 0, ...;
// 2^31, 2^31, 2^31, ...; 2^30, 2^30, 2^30, ...; 2^29, 2^29 + 2^31,
// 2^29, 2^29 + 2^31, ..., etc. so I force seed to be odd below.
//
// Even if x_initial is odd, if x_initial is 1 mod 4 then
//
// the lowest bit of x is always 1,
// the next-to-lowest bit of x is always 0,
// the 2nd-from-lowest bit of x alternates ... 0 1 0 1 0 1 0 1 ... ,
// the 3rd-from-lowest bit of x 4-cycles ... 0 1 1 0 0 1 1 0 ... ,
// the 4th-from-lowest bit of x has the 8-cycle ... 0 0 0 1 1 1 1 0 ... ,
// ...
//
// and if x_initial is 3 mod 4 then
//
// the lowest bit of x is always 1,
// the next-to-lowest bit of x is always 1,
// the 2nd-from-lowest bit of x alternates ... 0 1 0 1 0 1 0 1 ... ,
// the 3rd-from-lowest bit of x 4-cycles ... 0 0 1 1 0 0 1 1 ... ,
// the 4th-from-lowest bit of x has the 8-cycle ... 0 0 1 1 1 1 0 0 ... ,
// ...
//
// The generator's potency (min. s>=0 with (69069-1)^s = 0 mod 2^32) is
// 16, which seems to be alright by p. 25, Sec. 3.2.1.3 of Knuth. It
// also does well in the dimension 2..5 spectral tests, but it could be
// better in dimension 6 (Line 15, Table 1, p. 106, Sec. 3.3.4, Knuth).
//
// Note that the random number user does not see the values generated
// here directly since reloadMT() will always munge them first, so maybe
// none of all of this matters. In fact, the seed values made here could
// even be extra-special desirable if the Mersenne Twister theory says
// so-- that's why the only change I made is to restrict to odd seeds.
//
register uint32_t x = (seed | 1U) & 0xFFFFFFFFU, *s = state;
register int j;
for (left = 0, *s++ = x, j = N; --j; *s++ = (x *= 69069U) & 0xFFFFFFFFU)
;
}
extern uint32_t reloadMT(void)
{
register uint32_t *p0 = state, *p2 = state + 2, *pM = state + M, s0, s1;
register int j;
if (left < -1)
seedMT(4357U);
left = N - 1, next = state + 1;
for (s0 = state[0], s1 = state[1], j = N - M + 1; --j; s0 = s1, s1 = *p2++)
*p0++ = *pM++ ^ (mixBits(s0, s1) >> 1) ^ (loBit(s1) ? K : 0U);
for (pM = state, j = M; --j; s0 = s1, s1 = *p2++)
*p0++ = *pM++ ^ (mixBits(s0, s1) >> 1) ^ (loBit(s1) ? K : 0U);
s1 = state[0], *p0 = *pM ^ (mixBits(s0, s1) >> 1) ^ (loBit(s1) ? K : 0U);
s1 ^= (s1 >> 11);
s1 ^= (s1 << 7) & 0x9D2C5680U;
s1 ^= (s1 << 15) & 0xEFC60000U;
return (s1 ^ (s1 >> 18));
}
extern uint32_t randomMT(void)
{
uint32_t y;
if (--left < 0)
return (reloadMT());
y = *next++;
y ^= (y >> 11);
y ^= (y << 7) & 0x9D2C5680U;
y ^= (y << 15) & 0xEFC60000U;
return (y ^ (y >> 18));
}
#if 0
int main(void)
{
int j;
// you can seed with any uint32_t, but the best are odds in 0..(2^32 - 1)
seedMT(4357U);
// print the first 2,002 random numbers seven to a line as an example
for(j=0; j<2002; j++)
printf(" %10lu%s", (unsigned long) randomMT(), (j%7)==6 ? "\n" : "");
return(EXIT_SUCCESS);
}
#endif
| uucidl/plus-one-minus-one | src/lib/cokus.c | C | gpl-2.0 | 7,813 |
/*
* Copyright (C) ST-Ericsson SA 2010,2011
*
* Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
*
* License terms: GNU General Public License (GPL) version 2
*
* U9500 <-> M6718 IPC protocol implementation using SPI.
* state machine definition and functionality.
*/
#include <linux/modem/m6718_spi/modem_driver.h>
#include "modem_statemachine.h"
#include "modem_util.h"
#include "modem_netlink.h"
#include "modem_debug.h"
#include "modem_queue.h"
#include "modem_protocol.h"
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
#include "modem_state.h"
#endif
#define CMD_BOOTREQ (1)
#define CMD_BOOTRESP (2)
#define CMD_WRITE (3)
#define CMD_READ (4)
static u8 sm_init_enter(u8 event, struct ipc_link_context *context)
{
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
/* if modem is off un-configure the IPC GPIO pins for low-power */
if (modem_state_get_state() == MODEM_STATE_OFF) {
dev_info(&context->sdev->dev,
"link %d: modem is off, un-configuring GPIO\n",
context->link->id);
ipc_util_link_gpio_unconfig(context);
}
#endif
/* nothing more to do until an event happens */
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_init_exit(u8 event,
struct ipc_link_context *context)
{
bool int_active = false;
/*
* For reset event just re-enter init in case the modem has
* powered off - we need to reconfigure our GPIO pins
*/
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_INIT);
/* re-sample link INT pin */
int_active = ipc_util_int_is_active(context);
atomic_set(&context->state_int, int_active);
dev_info(&context->sdev->dev,
"link %d: link initialised; SS:INACTIVE(%d) INT:%s(%d)\n",
context->link->id,
ipc_util_ss_level_inactive(context),
int_active ? "ACTIVE" : "INACTIVE",
int_active ? ipc_util_int_level_active(context) :
ipc_util_int_level_inactive(context));
/* handshake is only on link 0 */
if (context->link->id == 0) {
if (!int_active) {
dev_info(&context->sdev->dev,
"link %d: slave INT signal is inactive\n",
context->link->id);
/* start boot handshake */
return ipc_sm_state(IPC_SM_SLW_TX_BOOTREQ);
} else {
/* wait for slave INT signal to stabilise inactive */
return ipc_sm_state(IPC_SM_WAIT_SLAVE_STABLE);
}
} else {
dev_info(&context->sdev->dev,
"link %d: boot sync not needed, going idle\n",
context->link->id);
return ipc_sm_state(IPC_SM_IDL);
}
}
static const struct ipc_sm_state *sm_init_aud_exit(u8 event,
struct ipc_link_context *context)
{
bool int_active = false;
/*
* For reset event just re-enter init in case the modem has
* powered off - we need to reconfigure our GPIO pins
*/
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_INIT_AUD);
/* re-sample link INT pin */
int_active = ipc_util_int_is_active(context);
atomic_set(&context->state_int, int_active);
dev_info(&context->sdev->dev,
"link %d: link initialised; SS:INACTIVE(%d) INT:%s(%d)\n",
context->link->id,
ipc_util_ss_level_inactive(context),
int_active ? "ACTIVE" : "INACTIVE",
int_active ? ipc_util_int_level_active(context) :
ipc_util_int_level_inactive(context));
dev_info(&context->sdev->dev,
"link %d: boot sync not needed, going idle\n",
context->link->id);
return ipc_sm_state(IPC_SM_IDL_AUD);
}
static u8 sm_wait_slave_stable_enter(u8 event, struct ipc_link_context *context)
{
static unsigned long printk_warn_time;
if (printk_timed_ratelimit(&printk_warn_time, 60 * 1000))
dev_info(&context->sdev->dev,
"link %d: waiting for stable inactive slave INT\n",
context->link->id);
ipc_util_start_slave_stable_timer(context);
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_wait_slave_stable_exit(u8 event,
struct ipc_link_context *context)
{
if (!ipc_util_int_is_active(context)) {
dev_info(&context->sdev->dev,
"link %d: slave INT signal is stable inactive\n",
context->link->id);
return ipc_sm_state(IPC_SM_SLW_TX_BOOTREQ);
} else {
return ipc_sm_state(IPC_SM_WAIT_SLAVE_STABLE);
}
}
static u8 sm_wait_handshake_inactive_enter(u8 event,
struct ipc_link_context *context)
{
dev_info(&context->sdev->dev,
"link %d: waiting for stable inactive slave INT\n",
context->link->id);
ipc_util_start_slave_stable_timer(context);
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_wait_handshake_inactive_exit(u8 event,
struct ipc_link_context *context)
{
int i;
if (!ipc_util_int_is_active(context)) {
dev_info(&context->sdev->dev,
"link %d: slave INT signal is inactive, going idle\n",
context->link->id);
/* modem sync is done */
atomic_inc(&l1_context.boot_sync_done);
ipc_broadcast_modem_online(context);
/*
* Kick the state machine for any initialised links - skip link0
* since this link has just completed handshake
*/
for (i = 1; i < IPC_NBR_SUPPORTED_SPI_LINKS; i++)
if (l1_context.device_context[i].state != NULL) {
dev_dbg(&context->sdev->dev,
"link %d has already been probed, "
"kicking state machine\n", i);
ipc_sm_kick(IPC_SM_RUN_INIT,
&l1_context.device_context[i]);
}
return ipc_sm_state(IPC_SM_IDL);
} else {
return ipc_sm_state(IPC_SM_WAIT_HANDSHAKE_INACTIVE);
}
}
static u8 sm_idl_enter(u8 event, struct ipc_link_context *context)
{
ipc_util_deactivate_ss(context);
ipc_dbg_enter_idle(context);
/* check if tx queue contains items */
if (atomic_read(&context->tx_q_count) > 0) {
dev_dbg(&context->sdev->dev,
"link %d: tx queue contains items\n",
context->link->id);
return IPC_SM_RUN_TX_REQ;
}
/* check if modem has already requested transaction start */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
return IPC_SM_RUN_SLAVE_IRQ;
}
dev_dbg(&context->sdev->dev,
"link %d: going idle\n", context->link->id);
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_idl_exit(u8 event,
struct ipc_link_context *context)
{
ipc_dbg_exit_idle(context);
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else if (event == IPC_SM_RUN_TX_REQ)
return ipc_sm_state(IPC_SM_SLW_TX_WR_CMD);
else if (event == IPC_SM_RUN_SLAVE_IRQ)
return ipc_sm_state(IPC_SM_SLW_TX_RD_CMD);
else
return ipc_sm_state(IPC_SM_HALT);
}
static const struct ipc_sm_state *sm_idl_aud_exit(u8 event,
struct ipc_link_context *context)
{
ipc_dbg_exit_idle(context);
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET_AUD);
/* always transmit data first */
return ipc_sm_state(IPC_SM_SLW_TX_WR_DAT_AUD);
}
static u8 sm_slw_tx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
{
struct ipc_tx_queue *frame;
/* get the frame from the head of the tx queue */
if (ipc_queue_is_empty(context)) {
dev_err(&context->sdev->dev,
"link %d error: tx queue is empty!\n",
context->link->id);
return IPC_SM_RUN_ABORT;
}
frame = ipc_queue_get_frame(context);
ipc_dbg_dump_frame(&context->sdev->dev, context->link->id, frame, true);
context->cmd = ipc_util_make_l1_header(CMD_WRITE, frame->counter,
frame->len);
dev_dbg(&context->sdev->dev,
"link %d: TX FRAME cmd %08x (type %d counter %d len %d)\n",
context->link->id,
context->cmd,
ipc_util_get_l1_cmd(context->cmd),
ipc_util_get_l1_counter(context->cmd),
ipc_util_get_l1_length(context->cmd));
ipc_util_spi_message_prepare(context, &context->cmd,
NULL, IPC_L1_HDR_SIZE);
context->frame = frame;
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static const struct ipc_sm_state *sm_slw_tx_wr_cmd_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT);
else
return ipc_sm_state(IPC_SM_ACT_TX_WR_CMD);
}
static u8 sm_act_tx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_tx_wr_cmd_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else
return ipc_sm_state(IPC_SM_SLW_TX_WR_DAT);
}
static u8 sm_slw_tx_wr_dat_enter(u8 event, struct ipc_link_context *context)
{
/* prepare to transfer the frame tx data */
ipc_util_spi_message_prepare(context, context->frame->data,
NULL, context->frame->len);
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static u8 sm_slw_tx_wr_dat_aud_enter(u8 event, struct ipc_link_context *context)
{
struct ipc_tx_queue *frame = NULL;
/* check if there is a frame to be sent */
if (!ipc_queue_is_empty(context)) {
frame = ipc_queue_get_frame(context);
} else {
/* no frame to send, create an empty one */
dev_dbg(&context->sdev->dev,
"link %d: no frame to send, allocating dummy\n",
context->link->id);
frame = ipc_queue_new_frame(context, 0);
if (frame == NULL)
return IPC_SM_RUN_ABORT;
}
ipc_dbg_dump_frame(&context->sdev->dev, context->link->id, frame, true);
/* prepare to transfer the frame tx data */
context->frame = frame;
ipc_util_spi_message_prepare(context, context->frame->data,
NULL, context->frame->len);
/* slave might already have signalled ready to transmit */
if (event == IPC_SM_RUN_SLAVE_IRQ || atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static const struct ipc_sm_state *sm_slw_tx_wr_dat_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT);
else
return ipc_sm_state(IPC_SM_ACT_TX_WR_DAT);
}
static const struct ipc_sm_state *sm_slw_tx_wr_dat_aud_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET_AUD);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT_AUD);
else
return ipc_sm_state(IPC_SM_ACT_TX_WR_DAT_AUD);
}
static u8 sm_act_tx_wr_dat_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_tx_wr_dat_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/* frame is sent, increment link tx counter */
context->tx_bytes += context->frame->actual_len;
#endif
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
{
u8 channel;
channel = ipc_util_get_l2_channel(*(u32 *)context->frame->data);
if (ipc_util_channel_is_loopback(channel)) {
context->last_frame = context->frame;
} else {
ipc_queue_delete_frame(context->frame);
context->frame = NULL;
}
}
#else
/* free the sent frame */
ipc_queue_delete_frame(context->frame);
context->frame = NULL;
#endif
return ipc_sm_state(IPC_SM_SLW_TX_RD_CMD);
}
static const struct ipc_sm_state *sm_act_tx_wr_dat_aud_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET_AUD);
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/* frame is sent, increment link tx counter */
context->tx_bytes += context->frame->actual_len;
#endif
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
{
u8 channel;
channel = ipc_util_get_l2_channel(*(u32 *)context->frame->data);
if (ipc_util_channel_is_loopback(channel)) {
/* create a copy of the frame */
context->last_frame = ipc_queue_new_frame(context,
context->frame->actual_len);
memcpy(context->last_frame->data,
context->frame->data,
context->frame->actual_len);
}
}
#endif
return ipc_sm_state(IPC_SM_SLW_RX_WR_DAT_AUD);
}
static u8 sm_slw_tx_rd_cmd_enter(u8 event, struct ipc_link_context *context)
{
context->cmd = ipc_util_make_l1_header(CMD_READ, 0, 0);
dev_dbg(&context->sdev->dev,
"link %d: cmd %08x (type %d)\n",
context->link->id,
context->cmd,
ipc_util_get_l1_cmd(context->cmd));
/* prepare the spi message to transfer */
ipc_util_spi_message_prepare(context, &context->cmd,
NULL, IPC_L1_HDR_SIZE);
/* check if the slave requested this transaction */
if (event == IPC_SM_RUN_SLAVE_IRQ) {
dev_dbg(&context->sdev->dev,
"link %d: slave initiated transaction, continue\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
}
static const struct ipc_sm_state *sm_slw_tx_rd_cmd_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT);
else
return ipc_sm_state(IPC_SM_ACT_TX_RD_CMD);
}
static u8 sm_act_tx_rd_cmd_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_tx_rd_cmd_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else
return ipc_sm_state(IPC_SM_SLW_RX_WR_CMD);
}
static u8 sm_slw_rx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
{
/* prepare to receive MESSAGE WRITE frame header */
ipc_util_spi_message_prepare(context, NULL,
&context->cmd, IPC_L1_HDR_SIZE);
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static const struct ipc_sm_state *sm_slw_rx_wr_cmd_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT);
else
return ipc_sm_state(IPC_SM_ACT_RX_WR_CMD);
}
static u8 sm_act_rx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_rx_wr_cmd_exit(u8 event,
struct ipc_link_context *context)
{
u8 cmd_type = ipc_util_get_l1_cmd(context->cmd);
int counter = ipc_util_get_l1_counter(context->cmd);
int length = ipc_util_get_l1_length(context->cmd);
dev_dbg(&context->sdev->dev,
"link %d: RX HEADER %08x (type %d counter %d length %d)\n",
context->link->id,
context->cmd,
cmd_type,
counter,
length);
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
if (cmd_type == CMD_WRITE) {
/* slave has data to send - allocate a frame to hold it */
context->frame = ipc_queue_new_frame(context, length);
if (context->frame == NULL)
return ipc_sm_state(IPC_SM_IDL);
context->frame->counter = counter;
ipc_util_spi_message_prepare(context, NULL,
context->frame->data, context->frame->len);
return ipc_sm_state(IPC_SM_ACT_RX_WR_DAT);
} else {
if (cmd_type != 0)
dev_err(&context->sdev->dev,
"link %d error: received invalid frame type %x "
"(%08x)! assuming TRANSACTION_END...\n",
context->link->id,
cmd_type,
context->cmd);
/* slave has no data to send */
dev_dbg(&context->sdev->dev,
"link %d: slave has no data to send\n",
context->link->id);
return ipc_sm_state(IPC_SM_IDL);
}
}
static u8 sm_slw_rx_wr_dat_aud_enter(u8 event, struct ipc_link_context *context)
{
/*
* We're using the same frame buffer we just sent, so no need for a
* new allocation here, just prepare the spi message
*/
ipc_util_spi_message_prepare(context, NULL,
context->frame->data, context->frame->len);
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static const struct ipc_sm_state *sm_slw_rx_wr_dat_aud_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET_AUD);
else if (event == IPC_SM_RUN_COMMS_TMO)
return ipc_sm_state(IPC_SM_HALT_AUD);
else
return ipc_sm_state(IPC_SM_ACT_RX_WR_DAT_AUD);
}
static u8 sm_act_rx_wr_dat_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* assume slave is still ready - prepare and start the spi transfer */
ipc_util_spi_message_prepare(context, NULL,
context->frame->data, context->frame->len);
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static u8 sm_act_rx_wr_dat_aud_enter(u8 event, struct ipc_link_context *context)
{
int err;
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_rx_wr_dat_exit(u8 event,
struct ipc_link_context *context)
{
u32 frame_hdr;
unsigned char l2_header;
unsigned int l2_length;
u8 *l2_data;
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET);
dev_dbg(&context->sdev->dev,
"link %d: RX PAYLOAD %d bytes\n",
context->link->id, context->frame->len);
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/* frame is received, increment link rx counter */
context->rx_bytes += context->frame->len;
#endif
/* decode L2 header */
frame_hdr = *(u32 *)context->frame->data;
l2_header = ipc_util_get_l2_channel(frame_hdr);
l2_length = ipc_util_get_l2_length(frame_hdr);
l2_data = (u8 *)context->frame->data + IPC_L2_HDR_SIZE;
context->frame->actual_len = l2_length + IPC_L2_HDR_SIZE;
ipc_dbg_dump_frame(&context->sdev->dev, context->link->id,
context->frame, false);
if (l2_length > (context->frame->len - 4)) {
dev_err(&context->sdev->dev,
"link %d: suspicious frame: L1 len %d L2 len %d\n",
context->link->id, context->frame->len, l2_length);
}
dev_dbg(&context->sdev->dev,
"link %d: L2 PDU decode: header 0x%08x channel %d length %d "
"data[%02x%02x%02x...]\n",
context->link->id, frame_hdr, l2_header, l2_length,
l2_data[0], l2_data[1], l2_data[2]);
if (ipc_util_channel_is_loopback(l2_header))
ipc_dbg_verify_rx_frame(context);
/* pass received frame up to L2mux layer */
if (!modem_protocol_channel_is_open(l2_header)) {
dev_err(&context->sdev->dev,
"link %d error: received frame on invalid channel %d, "
"frame discarded\n",
context->link->id, l2_header);
} else {
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/*
* Discard loopback frames if we are taking throughput
* measurements - we'll be loading the links and so will likely
* overload the buffers.
*/
if (!ipc_util_channel_is_loopback(l2_header))
#endif
modem_m6718_spi_receive(context->sdev,
l2_header, l2_length, l2_data);
}
/* data is copied by L2mux so free the frame here */
ipc_queue_delete_frame(context->frame);
context->frame = NULL;
/* check tx queue for content */
if (!ipc_queue_is_empty(context)) {
dev_dbg(&context->sdev->dev,
"link %d: tx queue not empty\n", context->link->id);
return ipc_sm_state(IPC_SM_SLW_TX_WR_CMD);
} else {
dev_dbg(&context->sdev->dev,
"link %d: tx queue empty\n", context->link->id);
return ipc_sm_state(IPC_SM_SLW_TX_RD_CMD);
}
}
static const struct ipc_sm_state *sm_act_rx_wr_dat_aud_exit(u8 event,
struct ipc_link_context *context)
{
u32 frame_hdr;
unsigned char l2_header;
unsigned int l2_length;
u8 *l2_data;
if (event == IPC_SM_RUN_RESET)
return ipc_sm_state(IPC_SM_RESET_AUD);
dev_dbg(&context->sdev->dev,
"link %d: RX PAYLOAD %d bytes\n",
context->link->id, context->frame->len);
/* decode L2 header */
frame_hdr = *(u32 *)context->frame->data;
l2_header = ipc_util_get_l2_channel(frame_hdr);
l2_length = ipc_util_get_l2_length(frame_hdr);
l2_data = (u8 *)context->frame->data + IPC_L2_HDR_SIZE;
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/* frame is received, increment link rx counter */
context->rx_bytes += l2_length;
#endif
if (frame_hdr != 0)
context->frame->actual_len = l2_length + IPC_L2_HDR_SIZE;
else
context->frame->actual_len = 0;
ipc_dbg_dump_frame(&context->sdev->dev, context->link->id,
context->frame, false);
if (l2_length > (context->frame->len - 4))
dev_err(&context->sdev->dev,
"link %d: suspicious frame: L1 len %d L2 len %d\n",
context->link->id, context->frame->len, l2_length);
dev_dbg(&context->sdev->dev,
"link %d: L2 PDU decode: header 0x%08x channel %d length %d "
"data[%02x%02x%02x...]\n",
context->link->id, frame_hdr, l2_header, l2_length,
l2_data[0], l2_data[1], l2_data[2]);
if (ipc_util_channel_is_loopback(l2_header))
ipc_dbg_verify_rx_frame(context);
/* did the slave actually have anything to send? */
if (frame_hdr != 0) {
/* pass received frame up to L2mux layer */
if (!modem_protocol_channel_is_open(l2_header)) {
dev_err(&context->sdev->dev,
"link %d error: received frame on invalid "
"channel %d, frame discarded\n",
context->link->id, l2_header);
} else {
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
/*
* Discard loopback frames if we are taking throughput
* measurements - we'll be loading the links and so will
* likely overload the buffers.
*/
if (!ipc_util_channel_is_loopback(l2_header))
#endif
modem_m6718_spi_receive(context->sdev,
l2_header, l2_length, l2_data);
}
} else {
dev_dbg(&context->sdev->dev,
"link %d: received dummy frame, discarding\n",
context->link->id);
}
/* data is copied by L2mux so free the frame here */
ipc_queue_delete_frame(context->frame);
context->frame = NULL;
/* audio link goes idle ready for next transaction */
return ipc_sm_state(IPC_SM_IDL_AUD);
}
static u8 sm_halt_enter(u8 event, struct ipc_link_context *context)
{
dev_err(&context->sdev->dev,
"link %d error: HALTED\n", context->link->id);
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
/*
* Force modem reset, this will cause a reset event from the modemstate
* driver which will reset the links. If debugfs is enabled then there
* is a userspace file which controls whether MSR is enabled or not.
*/
#ifdef CONFIG_DEBUG_FS
if (l1_context.msr_disable) {
dev_info(&context->sdev->dev,
"link %d: MSR is disabled by user, "
"not requesting modem reset\n", context->link->id);
return IPC_SM_RUN_RESET;
}
#endif
modem_state_force_reset();
#endif
return IPC_SM_RUN_RESET;
}
static const struct ipc_sm_state *sm_halt_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_RESET);
}
static const struct ipc_sm_state *sm_halt_aud_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_RESET_AUD);
}
static u8 sm_reset_enter(u8 event, struct ipc_link_context *context)
{
dev_err(&context->sdev->dev,
"link %d resetting\n", context->link->id);
if (context->link->id == 0)
ipc_broadcast_modem_reset(context);
ipc_util_deactivate_ss(context);
ipc_queue_reset(context);
if (context->frame != NULL) {
ipc_queue_delete_frame(context->frame);
context->frame = NULL;
}
#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
if (context->last_frame != NULL) {
ipc_queue_delete_frame(context->last_frame);
context->last_frame = NULL;
}
#endif
dev_dbg(&context->sdev->dev,
"link %d reset completed\n", context->link->id);
return IPC_SM_RUN_RESET;
}
static const struct ipc_sm_state *sm_reset_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_INIT);
}
static const struct ipc_sm_state *sm_reset_aud_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_INIT_AUD);
}
static u8 sm_slw_tx_bootreq_enter(u8 event, struct ipc_link_context *context)
{
dev_info(&context->sdev->dev,
"link %d: waiting for boot sync\n", context->link->id);
ipc_util_activate_ss(context);
context->cmd = ipc_util_make_l1_header(CMD_BOOTREQ, 0,
IPC_DRIVER_VERSION);
dev_dbg(&context->sdev->dev,
"link %d: TX HEADER cmd %08x (type %x)\n",
context->link->id,
context->cmd,
ipc_util_get_l1_cmd(context->cmd));
ipc_util_spi_message_prepare(context, &context->cmd,
NULL, IPC_L1_HDR_SIZE);
/* wait now for the slave to indicate ready... */
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_slw_tx_bootreq_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_ACT_TX_BOOTREQ);
}
static u8 sm_act_tx_bootreq_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_tx_bootreq_exit(u8 event,
struct ipc_link_context *context)
{
return ipc_sm_state(IPC_SM_SLW_RX_BOOTRESP);
}
static u8 sm_slw_rx_bootresp_enter(u8 event, struct ipc_link_context *context)
{
/* prepare to receive BOOTRESP frame header */
ipc_util_spi_message_prepare(context, NULL,
&context->cmd, IPC_L1_HDR_SIZE);
/* slave might already have signalled ready to transmit */
if (atomic_read(&context->state_int)) {
dev_dbg(&context->sdev->dev,
"link %d: slave has already signalled ready\n",
context->link->id);
ipc_util_activate_ss(context);
return IPC_SM_RUN_SLAVE_IRQ;
} else {
ipc_util_activate_ss_with_tmo(context);
return IPC_SM_RUN_NONE;
}
}
static const struct ipc_sm_state *sm_slw_rx_bootresp_exit(u8 event,
struct ipc_link_context *context)
{
if (event == IPC_SM_RUN_COMMS_TMO) {
/*
* Modem timeout: was it really ready or just noise?
* Revert to waiting for handshake to start.
*/
ipc_util_deactivate_ss(context);
return ipc_sm_state(IPC_SM_SLW_TX_BOOTREQ);
} else {
return ipc_sm_state(IPC_SM_ACT_RX_BOOTRESP);
}
}
static u8 sm_act_rx_bootresp_enter(u8 event, struct ipc_link_context *context)
{
int err;
/* slave is ready - start the spi transfer */
dev_dbg(&context->sdev->dev,
"link %d: starting spi tfr\n", context->link->id);
err = spi_async(context->sdev, &context->spi_message);
if (err < 0) {
dev_err(&context->sdev->dev,
"link %d error: spi tfr start failed, error %d\n",
context->link->id, err);
return IPC_SM_RUN_ABORT;
}
return IPC_SM_RUN_NONE;
}
static const struct ipc_sm_state *sm_act_rx_bootresp_exit(u8 event,
struct ipc_link_context *context)
{
u8 cmd_type = ipc_util_get_l1_cmd(context->cmd);
u8 modem_ver;
dev_dbg(&context->sdev->dev,
"link %d: RX HEADER %08x (type %d)\n",
context->link->id, context->cmd, cmd_type);
if (cmd_type == CMD_BOOTRESP) {
modem_ver = ipc_util_get_l1_bootresp_ver(context->cmd);
dev_info(&context->sdev->dev,
"link %d: boot sync done; "
"APE version %02x, MODEM version %02x\n",
context->link->id, IPC_DRIVER_VERSION, modem_ver);
/* check for minimum required modem version */
if (modem_ver < IPC_DRIVER_MODEM_MIN_VER) {
dev_warn(&context->sdev->dev,
"link %d warning: modem version mismatch! "
"minimum required version is %02x\n",
context->link->id,
IPC_DRIVER_MODEM_MIN_VER);
}
return ipc_sm_state(IPC_SM_WAIT_HANDSHAKE_INACTIVE);
} else {
/* invalid response... this is not our slave */
dev_err(&context->sdev->dev,
"link %d error: expected %x (BOOTRESP), received %x.\n",
context->link->id,
CMD_BOOTRESP,
cmd_type);
return ipc_sm_state(IPC_SM_HALT);
}
}
/* the driver protocol state machine */
static const struct ipc_sm_state state_machine[IPC_SM_STATE_ID_NBR] = {
[IPC_SM_INIT] = {
.id = IPC_SM_INIT,
.enter = sm_init_enter,
.exit = sm_init_exit,
.events = IPC_SM_RUN_INIT | IPC_SM_RUN_RESET
},
[IPC_SM_HALT] = {
.id = IPC_SM_HALT,
.enter = sm_halt_enter,
.exit = sm_halt_exit,
.events = IPC_SM_RUN_RESET
},
[IPC_SM_RESET] = {
.id = IPC_SM_RESET,
.enter = sm_reset_enter,
.exit = sm_reset_exit,
.events = IPC_SM_RUN_RESET
},
[IPC_SM_WAIT_SLAVE_STABLE] = {
.id = IPC_SM_WAIT_SLAVE_STABLE,
.enter = sm_wait_slave_stable_enter,
.exit = sm_wait_slave_stable_exit,
.events = IPC_SM_RUN_STABLE_TMO
},
[IPC_SM_WAIT_HANDSHAKE_INACTIVE] = {
.id = IPC_SM_WAIT_HANDSHAKE_INACTIVE,
.enter = sm_wait_handshake_inactive_enter,
.exit = sm_wait_handshake_inactive_exit,
.events = IPC_SM_RUN_STABLE_TMO
},
[IPC_SM_SLW_TX_BOOTREQ] = {
.id = IPC_SM_SLW_TX_BOOTREQ,
.enter = sm_slw_tx_bootreq_enter,
.exit = sm_slw_tx_bootreq_exit,
.events = IPC_SM_RUN_SLAVE_IRQ
},
[IPC_SM_ACT_TX_BOOTREQ] = {
.id = IPC_SM_ACT_TX_BOOTREQ,
.enter = sm_act_tx_bootreq_enter,
.exit = sm_act_tx_bootreq_exit,
.events = IPC_SM_RUN_TFR_COMPLETE
},
[IPC_SM_SLW_RX_BOOTRESP] = {
.id = IPC_SM_SLW_RX_BOOTRESP,
.enter = sm_slw_rx_bootresp_enter,
.exit = sm_slw_rx_bootresp_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO
},
[IPC_SM_ACT_RX_BOOTRESP] = {
.id = IPC_SM_ACT_RX_BOOTRESP,
.enter = sm_act_rx_bootresp_enter,
.exit = sm_act_rx_bootresp_exit,
.events = IPC_SM_RUN_TFR_COMPLETE
},
[IPC_SM_IDL] = {
.id = IPC_SM_IDL,
.enter = sm_idl_enter,
.exit = sm_idl_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_TX_REQ |
IPC_SM_RUN_RESET
},
[IPC_SM_SLW_TX_WR_CMD] = {
.id = IPC_SM_SLW_TX_WR_CMD,
.enter = sm_slw_tx_wr_cmd_enter,
.exit = sm_slw_tx_wr_cmd_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_TX_WR_CMD] = {
.id = IPC_SM_ACT_TX_WR_CMD,
.enter = sm_act_tx_wr_cmd_enter,
.exit = sm_act_tx_wr_cmd_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
[IPC_SM_SLW_TX_WR_DAT] = {
.id = IPC_SM_SLW_TX_WR_DAT,
.enter = sm_slw_tx_wr_dat_enter,
.exit = sm_slw_tx_wr_dat_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_TX_WR_DAT] = {
.id = IPC_SM_ACT_TX_WR_DAT,
.enter = sm_act_tx_wr_dat_enter,
.exit = sm_act_tx_wr_dat_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
[IPC_SM_SLW_TX_RD_CMD] = {
.id = IPC_SM_SLW_TX_RD_CMD,
.enter = sm_slw_tx_rd_cmd_enter,
.exit = sm_slw_tx_rd_cmd_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_TX_RD_CMD] = {
.id = IPC_SM_ACT_TX_RD_CMD,
.enter = sm_act_tx_rd_cmd_enter,
.exit = sm_act_tx_rd_cmd_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
[IPC_SM_SLW_RX_WR_CMD] = {
.id = IPC_SM_SLW_RX_WR_CMD,
.enter = sm_slw_rx_wr_cmd_enter,
.exit = sm_slw_rx_wr_cmd_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_RX_WR_CMD] = {
.id = IPC_SM_ACT_RX_WR_CMD,
.enter = sm_act_rx_wr_cmd_enter,
.exit = sm_act_rx_wr_cmd_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
[IPC_SM_ACT_RX_WR_DAT] = {
.id = IPC_SM_ACT_RX_WR_DAT,
.enter = sm_act_rx_wr_dat_enter,
.exit = sm_act_rx_wr_dat_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
/* audio link states below */
[IPC_SM_INIT_AUD] = {
.id = IPC_SM_INIT_AUD,
.enter = sm_init_enter,
.exit = sm_init_aud_exit,
.events = IPC_SM_RUN_INIT | IPC_SM_RUN_RESET
},
[IPC_SM_HALT_AUD] = {
.id = IPC_SM_HALT_AUD,
.enter = sm_halt_enter,
.exit = sm_halt_aud_exit,
.events = IPC_SM_RUN_RESET
},
[IPC_SM_RESET_AUD] = {
.id = IPC_SM_RESET_AUD,
.enter = sm_reset_enter,
.exit = sm_reset_aud_exit,
.events = IPC_SM_RUN_RESET
},
[IPC_SM_IDL_AUD] = {
.id = IPC_SM_IDL_AUD,
.enter = sm_idl_enter,
.exit = sm_idl_aud_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_TX_REQ |
IPC_SM_RUN_RESET
},
[IPC_SM_SLW_TX_WR_DAT_AUD] = {
.id = IPC_SM_SLW_TX_WR_DAT_AUD,
.enter = sm_slw_tx_wr_dat_aud_enter,
.exit = sm_slw_tx_wr_dat_aud_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_TX_WR_DAT_AUD] = {
.id = IPC_SM_ACT_TX_WR_DAT_AUD,
.enter = sm_act_tx_wr_dat_enter,
.exit = sm_act_tx_wr_dat_aud_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
},
[IPC_SM_SLW_RX_WR_DAT_AUD] = {
.id = IPC_SM_SLW_RX_WR_DAT_AUD,
.enter = sm_slw_rx_wr_dat_aud_enter,
.exit = sm_slw_rx_wr_dat_aud_exit,
.events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
IPC_SM_RUN_RESET
},
[IPC_SM_ACT_RX_WR_DAT_AUD] = {
.id = IPC_SM_ACT_RX_WR_DAT_AUD,
.enter = sm_act_rx_wr_dat_aud_enter,
.exit = sm_act_rx_wr_dat_aud_exit,
.events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
}
};
const struct ipc_sm_state *ipc_sm_idle_state(struct ipc_link_context *context)
{
if (context->link->id == IPC_LINK_AUDIO)
return ipc_sm_state(IPC_SM_IDL_AUD);
else
return ipc_sm_state(IPC_SM_IDL);
}
const struct ipc_sm_state *ipc_sm_init_state(struct ipc_link_context *context)
{
if (context->link->id == IPC_LINK_AUDIO)
return ipc_sm_state(IPC_SM_INIT_AUD);
else
return ipc_sm_state(IPC_SM_INIT);
}
const struct ipc_sm_state *ipc_sm_state(u8 id)
{
BUG_ON(id >= IPC_SM_STATE_ID_NBR);
return &state_machine[id];
}
bool ipc_sm_valid_for_state(u8 event, const struct ipc_sm_state *state)
{
return (state->events & event) == event;
}
static void state_machine_run(struct ipc_link_context *context, u8 event)
{
struct modem_m6718_spi_link_platform_data *link = context->link;
struct spi_device *sdev = context->sdev;
const struct ipc_sm_state *cur_state = context->state;
/* some sanity checking */
if (context == NULL || link == NULL || cur_state == NULL) {
pr_err("M6718 IPC protocol error: "
"inconsistent driver state, ignoring event\n");
return;
}
dev_dbg(&sdev->dev, "link %d: RUNNING in %s (%s)\n", link->id,
ipc_dbg_state_id(cur_state), ipc_dbg_event(event));
/* valid trigger event for current state? */
if (!ipc_sm_valid_for_state(event, cur_state)) {
dev_dbg(&sdev->dev,
"link %d: ignoring invalid event\n", link->id);
ipc_dbg_ignoring_event(context, event);
return;
}
ipc_dbg_handling_event(context, event);
/* run machine while state entry functions trigger new changes */
do {
if (event == IPC_SM_RUN_SLAVE_IRQ &&
!ipc_util_int_is_active(context)) {
dev_err(&sdev->dev,
"link %d error: slave is not ready! (%s)",
link->id,
ipc_dbg_state_id(cur_state));
}
if (event == IPC_SM_RUN_ABORT) {
dev_err(&sdev->dev,
"link %d error: abort event\n", link->id);
/* reset state to idle */
context->state = ipc_sm_idle_state(context);
break;
} else {
/* exit current state */
dev_dbg(&sdev->dev, "link %d: exit %s (%s)\n",
link->id, ipc_dbg_state_id(cur_state),
ipc_dbg_event(event));
cur_state = cur_state->exit(event, context);
context->state = cur_state;
}
/* reset state of slave irq to prepare for next event */
if (event == IPC_SM_RUN_SLAVE_IRQ)
atomic_set(&context->state_int, 0);
/* enter new state */
dev_dbg(&sdev->dev, "link %d: enter %s (%s)\n", link->id,
ipc_dbg_state_id(cur_state), ipc_dbg_event(event));
event = context->state->enter(event, context);
ipc_dbg_entering_state(context);
} while (event != IPC_SM_RUN_NONE);
dev_dbg(&sdev->dev, "link %d: STOPPED in %s\n", link->id,
ipc_dbg_state_id(cur_state));
}
void ipc_sm_kick(u8 event, struct ipc_link_context *context)
{
unsigned long flags;
struct modem_m6718_spi_link_platform_data *link = context->link;
struct spi_device *sdev = context->sdev;
struct spi_message *msg = &context->spi_message;
u8 i;
spin_lock_irqsave(&context->sm_lock, flags);
switch (event) {
case IPC_SM_RUN_SLAVE_IRQ:
dev_dbg(&sdev->dev,
"link %d EVENT: slave-ready irq\n", link->id);
del_timer(&context->comms_timer);
atomic_set(&context->state_int,
ipc_util_int_is_active(context));
break;
case IPC_SM_RUN_TFR_COMPLETE:
dev_dbg(&sdev->dev,
"link %d EVENT: spi tfr complete (status %d len %d)\n",
link->id, msg->status, msg->actual_length);
ipc_dbg_dump_spi_tfr(context);
break;
case IPC_SM_RUN_COMMS_TMO:
{
char *statestr;
struct ipc_link_context *contexts = l1_context.device_context;
statestr = ipc_dbg_link_state_str(context);
dev_err(&sdev->dev,
"link %d EVENT: modem comms timeout (%s)!\n",
link->id, ipc_dbg_state_id(context->state));
if (statestr != NULL) {
dev_err(&sdev->dev, "%s", statestr);
kfree(statestr);
}
/* cancel all link timeout timers except this one */
for (i = 0; i < IPC_NBR_SUPPORTED_SPI_LINKS; i++)
if (contexts[i].link->id != link->id)
del_timer(&contexts[i].comms_timer);
break;
}
case IPC_SM_RUN_STABLE_TMO:
dev_dbg(&sdev->dev,
"link %d EVENT: slave-stable timeout\n", link->id);
break;
case IPC_SM_RUN_RESET:
dev_dbg(&sdev->dev,
"link %d EVENT: reset\n", link->id);
del_timer(&context->comms_timer);
break;
default:
break;
}
if (!ipc_util_link_is_suspended(context))
state_machine_run(context, event);
else
dev_dbg(&sdev->dev,
"link %d is suspended, waiting for resume\n", link->id);
spin_unlock_irqrestore(&context->sm_lock, flags);
}
| janrinze/snowballkernel | drivers/modem/m6718_spi/statemachine.c | C | gpl-2.0 | 40,413 |
/*
* Virtual Raw MIDI client on Sequencer
*
* Copyright (c) 2000 by Takashi Iwai <tiwai@suse.de>,
* Jaroslav Kysela <perex@perex.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* Virtual Raw MIDI client
*
* The virtual rawmidi client is a sequencer client which associate
* a rawmidi device file. The created rawmidi device file can be
* accessed as a normal raw midi, but its MIDI source and destination
* are arbitrary. For example, a user-client software synth connected
* to this port can be used as a normal midi device as well.
*
* The virtual rawmidi device accepts also multiple opens. Each file
* has its own input buffer, so that no conflict would occur. The drain
* of input/output buffer acts only to the local buffer.
*
*/
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/rawmidi.h>
#include <sound/info.h>
#include <sound/control.h>
#include <sound/minors.h>
#include <sound/seq_kernel.h>
#include <sound/seq_midi_event.h>
#include <sound/seq_virmidi.h>
MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("Virtual Raw MIDI client on Sequencer");
MODULE_LICENSE("GPL");
/*
* initialize an event record
*/
static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
struct snd_seq_event *ev)
{
memset(ev, 0, sizeof(*ev));
ev->source.port = vmidi->port;
switch (vmidi->seq_mode) {
case SNDRV_VIRMIDI_SEQ_DISPATCH:
ev->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
break;
case SNDRV_VIRMIDI_SEQ_ATTACH:
/* FIXME: source and destination are same - not good.. */
ev->dest.client = vmidi->client;
ev->dest.port = vmidi->port;
break;
}
ev->type = SNDRV_SEQ_EVENT_NONE;
}
/*
* decode input event and put to read buffer of each opened file
*/
static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
struct snd_seq_event *ev)
{
struct snd_virmidi *vmidi;
unsigned char msg[4];
int len;
read_lock(&rdev->filelist_lock);
list_for_each_entry(vmidi, &rdev->filelist, list) {
if (!vmidi->trigger)
continue;
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
continue;
snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
} else {
len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
if (len > 0)
snd_rawmidi_receive(vmidi->substream, msg, len);
}
}
read_unlock(&rdev->filelist_lock);
return 0;
}
/*
* receive an event from the remote virmidi port
*
* for rawmidi inputs, you can call this function from the event
* handler of a remote port which is attached to the virmidi via
* SNDRV_VIRMIDI_SEQ_ATTACH.
*/
#if 0
int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
{
struct snd_virmidi_dev *rdev;
rdev = rmidi->private_data;
return snd_virmidi_dev_receive_event(rdev, ev);
}
#endif /* 0 */
/*
* event handler of virmidi port
*/
static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
void *private_data, int atomic, int hop)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
if (!(rdev->flags & SNDRV_VIRMIDI_USE))
return 0; /* ignored */
return snd_virmidi_dev_receive_event(rdev, ev);
}
/*
* trigger rawmidi stream for input
*/
static void snd_virmidi_input_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
if (up) {
vmidi->trigger = 1;
} else {
vmidi->trigger = 0;
}
}
/*
* trigger rawmidi stream for output
*/
static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
int count, res;
unsigned char buf[32], *pbuf;
if (up) {
vmidi->trigger = 1;
if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
!(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
return; /* ignored */
}
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
return;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
while (1) {
count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
if (count <= 0)
break;
pbuf = buf;
while (count > 0) {
res = snd_midi_event_encode(vmidi->parser, pbuf, count, &vmidi->event);
if (res < 0) {
snd_midi_event_reset_encode(vmidi->parser);
continue;
}
snd_rawmidi_transmit_ack(substream, res);
pbuf += res;
count -= res;
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
return;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
}
}
} else {
vmidi->trigger = 0;
}
}
/*
* open rawmidi handle for input
*/
static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_rawmidi_runtime *runtime = substream->runtime;
struct snd_virmidi *vmidi;
unsigned long flags;
vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
if (vmidi == NULL)
return -ENOMEM;
vmidi->substream = substream;
if (snd_midi_event_new(0, &vmidi->parser) < 0) {
kfree(vmidi);
return -ENOMEM;
}
vmidi->seq_mode = rdev->seq_mode;
vmidi->client = rdev->client;
vmidi->port = rdev->port;
runtime->private_data = vmidi;
write_lock_irqsave(&rdev->filelist_lock, flags);
list_add_tail(&vmidi->list, &rdev->filelist);
write_unlock_irqrestore(&rdev->filelist_lock, flags);
vmidi->rdev = rdev;
return 0;
}
/*
* open rawmidi handle for output
*/
static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_rawmidi_runtime *runtime = substream->runtime;
struct snd_virmidi *vmidi;
vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
if (vmidi == NULL)
return -ENOMEM;
vmidi->substream = substream;
if (snd_midi_event_new(MAX_MIDI_EVENT_BUF, &vmidi->parser) < 0) {
kfree(vmidi);
return -ENOMEM;
}
vmidi->seq_mode = rdev->seq_mode;
vmidi->client = rdev->client;
vmidi->port = rdev->port;
snd_virmidi_init_event(vmidi, &vmidi->event);
vmidi->rdev = rdev;
runtime->private_data = vmidi;
return 0;
}
/*
* close rawmidi handle for input
*/
static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
snd_midi_event_free(vmidi->parser);
list_del(&vmidi->list);
substream->runtime->private_data = NULL;
kfree(vmidi);
return 0;
}
/*
* close rawmidi handle for output
*/
static int snd_virmidi_output_close(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
snd_midi_event_free(vmidi->parser);
substream->runtime->private_data = NULL;
kfree(vmidi);
return 0;
}
/*
* subscribe callback - allow output to rawmidi device
*/
static int snd_virmidi_subscribe(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
if (!try_module_get(rdev->card->module))
return -EFAULT;
rdev->flags |= SNDRV_VIRMIDI_SUBSCRIBE;
return 0;
}
/*
* unsubscribe callback - disallow output to rawmidi device
*/
static int snd_virmidi_unsubscribe(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
rdev->flags &= ~SNDRV_VIRMIDI_SUBSCRIBE;
module_put(rdev->card->module);
return 0;
}
/*
* use callback - allow input to rawmidi device
*/
static int snd_virmidi_use(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
if (!try_module_get(rdev->card->module))
return -EFAULT;
rdev->flags |= SNDRV_VIRMIDI_USE;
return 0;
}
/*
* unuse callback - disallow input to rawmidi device
*/
static int snd_virmidi_unuse(void *private_data,
struct snd_seq_port_subscribe *info)
{
struct snd_virmidi_dev *rdev;
rdev = private_data;
rdev->flags &= ~SNDRV_VIRMIDI_USE;
module_put(rdev->card->module);
return 0;
}
/*
* Register functions
*/
static struct snd_rawmidi_ops snd_virmidi_input_ops = {
.open = snd_virmidi_input_open,
.close = snd_virmidi_input_close,
.trigger = snd_virmidi_input_trigger,
};
static struct snd_rawmidi_ops snd_virmidi_output_ops = {
.open = snd_virmidi_output_open,
.close = snd_virmidi_output_close,
.trigger = snd_virmidi_output_trigger,
};
/*
* create a sequencer client and a port
*/
static int snd_virmidi_dev_attach_seq(struct snd_virmidi_dev *rdev)
{
int client;
struct snd_seq_port_callback pcallbacks;
struct snd_seq_port_info *pinfo;
int err;
if (rdev->client >= 0)
return 0;
pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
if (!pinfo) {
err = -ENOMEM;
goto __error;
}
client = snd_seq_create_kernel_client(rdev->card, rdev->device,
"%s %d-%d", rdev->rmidi->name,
rdev->card->number,
rdev->device);
if (client < 0) {
err = client;
goto __error;
}
rdev->client = client;
/* create a port */
pinfo->addr.client = client;
sprintf(pinfo->name, "VirMIDI %d-%d", rdev->card->number, rdev->device);
/* set all capabilities */
pinfo->capability |= SNDRV_SEQ_PORT_CAP_WRITE | SNDRV_SEQ_PORT_CAP_SYNC_WRITE | SNDRV_SEQ_PORT_CAP_SUBS_WRITE;
pinfo->capability |= SNDRV_SEQ_PORT_CAP_READ | SNDRV_SEQ_PORT_CAP_SYNC_READ | SNDRV_SEQ_PORT_CAP_SUBS_READ;
pinfo->capability |= SNDRV_SEQ_PORT_CAP_DUPLEX;
pinfo->type = SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC
| SNDRV_SEQ_PORT_TYPE_SOFTWARE
| SNDRV_SEQ_PORT_TYPE_PORT;
pinfo->midi_channels = 16;
memset(&pcallbacks, 0, sizeof(pcallbacks));
pcallbacks.owner = THIS_MODULE;
pcallbacks.private_data = rdev;
pcallbacks.subscribe = snd_virmidi_subscribe;
pcallbacks.unsubscribe = snd_virmidi_unsubscribe;
pcallbacks.use = snd_virmidi_use;
pcallbacks.unuse = snd_virmidi_unuse;
pcallbacks.event_input = snd_virmidi_event_input;
pinfo->kernel = &pcallbacks;
err = snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, pinfo);
if (err < 0) {
snd_seq_delete_kernel_client(client);
rdev->client = -1;
goto __error;
}
rdev->port = pinfo->addr.port;
err = 0; /* success */
__error:
kfree(pinfo);
return err;
}
/*
* release the sequencer client
*/
static void snd_virmidi_dev_detach_seq(struct snd_virmidi_dev *rdev)
{
if (rdev->client >= 0) {
snd_seq_delete_kernel_client(rdev->client);
rdev->client = -1;
}
}
/*
* register the device
*/
static int snd_virmidi_dev_register(struct snd_rawmidi *rmidi)
{
struct snd_virmidi_dev *rdev = rmidi->private_data;
int err;
switch (rdev->seq_mode) {
case SNDRV_VIRMIDI_SEQ_DISPATCH:
err = snd_virmidi_dev_attach_seq(rdev);
if (err < 0)
return err;
break;
case SNDRV_VIRMIDI_SEQ_ATTACH:
if (rdev->client == 0)
return -EINVAL;
/* should check presence of port more strictly.. */
break;
default:
snd_printk(KERN_ERR "seq_mode is not set: %d\n", rdev->seq_mode);
return -EINVAL;
}
return 0;
}
/*
* unregister the device
*/
static int snd_virmidi_dev_unregister(struct snd_rawmidi *rmidi)
{
struct snd_virmidi_dev *rdev = rmidi->private_data;
if (rdev->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH)
snd_virmidi_dev_detach_seq(rdev);
return 0;
}
/*
*
*/
static struct snd_rawmidi_global_ops snd_virmidi_global_ops = {
.dev_register = snd_virmidi_dev_register,
.dev_unregister = snd_virmidi_dev_unregister,
};
/*
* free device
*/
static void snd_virmidi_free(struct snd_rawmidi *rmidi)
{
struct snd_virmidi_dev *rdev = rmidi->private_data;
kfree(rdev);
}
/*
* create a new device
*
*/
/* exported */
int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmidi)
{
struct snd_rawmidi *rmidi;
struct snd_virmidi_dev *rdev;
int err;
*rrmidi = NULL;
if ((err = snd_rawmidi_new(card, "VirMidi", device,
16, /* may be configurable */
16, /* may be configurable */
&rmidi)) < 0)
return err;
strcpy(rmidi->name, rmidi->id);
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (rdev == NULL) {
snd_device_free(card, rmidi);
return -ENOMEM;
}
rdev->card = card;
rdev->rmidi = rmidi;
rdev->device = device;
rdev->client = -1;
rwlock_init(&rdev->filelist_lock);
INIT_LIST_HEAD(&rdev->filelist);
rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
rmidi->private_data = rdev;
rmidi->private_free = snd_virmidi_free;
rmidi->ops = &snd_virmidi_global_ops;
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_virmidi_input_ops);
snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_virmidi_output_ops);
rmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT |
SNDRV_RAWMIDI_INFO_OUTPUT |
SNDRV_RAWMIDI_INFO_DUPLEX;
*rrmidi = rmidi;
return 0;
}
/*
* ENTRY functions
*/
static int __init alsa_virmidi_init(void)
{
return 0;
}
static void __exit alsa_virmidi_exit(void)
{
}
module_init(alsa_virmidi_init)
module_exit(alsa_virmidi_exit)
EXPORT_SYMBOL(snd_virmidi_new);
| Jackeagle/android_kernel_sony_c2305 | sound/core/seq/seq_virmidi.c | C | gpl-2.0 | 14,463 |
/*
* Copyright (C) 2014 Fraunhofer ITWM
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Written by:
* Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
*/
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/completion.h>
#include <net/ieee802154.h>
#include <crypto/algapi.h>
#include "mac802154.h"
#include "llsec.h"
static void llsec_key_put(struct mac802154_llsec_key *key);
static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
const struct ieee802154_llsec_key_id *b);
static void llsec_dev_free(struct mac802154_llsec_device *dev);
void mac802154_llsec_init(struct mac802154_llsec *sec)
{
memset(sec, 0, sizeof(*sec));
memset(&sec->params.default_key_source, 0xFF, IEEE802154_ADDR_LEN);
INIT_LIST_HEAD(&sec->table.security_levels);
INIT_LIST_HEAD(&sec->table.devices);
INIT_LIST_HEAD(&sec->table.keys);
hash_init(sec->devices_short);
hash_init(sec->devices_hw);
rwlock_init(&sec->lock);
}
void mac802154_llsec_destroy(struct mac802154_llsec *sec)
{
struct ieee802154_llsec_seclevel *sl, *sn;
struct ieee802154_llsec_device *dev, *dn;
struct ieee802154_llsec_key_entry *key, *kn;
list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) {
struct mac802154_llsec_seclevel *msl;
msl = container_of(sl, struct mac802154_llsec_seclevel, level);
list_del(&sl->list);
kfree(msl);
}
list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
struct mac802154_llsec_device *mdev;
mdev = container_of(dev, struct mac802154_llsec_device, dev);
list_del(&dev->list);
llsec_dev_free(mdev);
}
list_for_each_entry_safe(key, kn, &sec->table.keys, list) {
struct mac802154_llsec_key *mkey;
mkey = container_of(key->key, struct mac802154_llsec_key, key);
list_del(&key->list);
llsec_key_put(mkey);
kfree(key);
}
}
int mac802154_llsec_get_params(struct mac802154_llsec *sec,
struct ieee802154_llsec_params *params)
{
read_lock_bh(&sec->lock);
*params = sec->params;
read_unlock_bh(&sec->lock);
return 0;
}
int mac802154_llsec_set_params(struct mac802154_llsec *sec,
const struct ieee802154_llsec_params *params,
int changed)
{
write_lock_bh(&sec->lock);
if (changed & IEEE802154_LLSEC_PARAM_ENABLED)
sec->params.enabled = params->enabled;
if (changed & IEEE802154_LLSEC_PARAM_FRAME_COUNTER)
sec->params.frame_counter = params->frame_counter;
if (changed & IEEE802154_LLSEC_PARAM_OUT_LEVEL)
sec->params.out_level = params->out_level;
if (changed & IEEE802154_LLSEC_PARAM_OUT_KEY)
sec->params.out_key = params->out_key;
if (changed & IEEE802154_LLSEC_PARAM_KEY_SOURCE)
sec->params.default_key_source = params->default_key_source;
if (changed & IEEE802154_LLSEC_PARAM_PAN_ID)
sec->params.pan_id = params->pan_id;
if (changed & IEEE802154_LLSEC_PARAM_HWADDR)
sec->params.hwaddr = params->hwaddr;
if (changed & IEEE802154_LLSEC_PARAM_COORD_HWADDR)
sec->params.coord_hwaddr = params->coord_hwaddr;
if (changed & IEEE802154_LLSEC_PARAM_COORD_SHORTADDR)
sec->params.coord_shortaddr = params->coord_shortaddr;
write_unlock_bh(&sec->lock);
return 0;
}
static struct mac802154_llsec_key*
llsec_key_alloc(const struct ieee802154_llsec_key *template)
{
const int authsizes[3] = { 4, 8, 16 };
struct mac802154_llsec_key *key;
int i;
key = kzalloc(sizeof(*key), GFP_KERNEL);
if (!key)
return NULL;
kref_init(&key->ref);
key->key = *template;
BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm));
for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
CRYPTO_ALG_ASYNC);
if (!key->tfm[i])
goto err_tfm;
if (crypto_aead_setkey(key->tfm[i], template->key,
IEEE802154_LLSEC_KEY_SIZE))
goto err_tfm;
if (crypto_aead_setauthsize(key->tfm[i], authsizes[i]))
goto err_tfm;
}
key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
if (!key->tfm0)
goto err_tfm;
if (crypto_blkcipher_setkey(key->tfm0, template->key,
IEEE802154_LLSEC_KEY_SIZE))
goto err_tfm0;
return key;
err_tfm0:
crypto_free_blkcipher(key->tfm0);
err_tfm:
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
if (key->tfm[i])
crypto_free_aead(key->tfm[i]);
kfree(key);
return NULL;
}
static void llsec_key_release(struct kref *ref)
{
struct mac802154_llsec_key *key;
int i;
key = container_of(ref, struct mac802154_llsec_key, ref);
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
crypto_free_aead(key->tfm[i]);
crypto_free_blkcipher(key->tfm0);
kfree(key);
}
static struct mac802154_llsec_key*
llsec_key_get(struct mac802154_llsec_key *key)
{
kref_get(&key->ref);
return key;
}
static void llsec_key_put(struct mac802154_llsec_key *key)
{
kref_put(&key->ref, llsec_key_release);
}
static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
const struct ieee802154_llsec_key_id *b)
{
if (a->mode != b->mode)
return false;
if (a->mode == IEEE802154_SCF_KEY_IMPLICIT)
return ieee802154_addr_equal(&a->device_addr, &b->device_addr);
if (a->id != b->id)
return false;
switch (a->mode) {
case IEEE802154_SCF_KEY_INDEX:
return true;
case IEEE802154_SCF_KEY_SHORT_INDEX:
return a->short_source == b->short_source;
case IEEE802154_SCF_KEY_HW_INDEX:
return a->extended_source == b->extended_source;
}
return false;
}
int mac802154_llsec_key_add(struct mac802154_llsec *sec,
const struct ieee802154_llsec_key_id *id,
const struct ieee802154_llsec_key *key)
{
struct mac802154_llsec_key *mkey = NULL;
struct ieee802154_llsec_key_entry *pos, *new;
if (!(key->frame_types & (1 << IEEE802154_FC_TYPE_MAC_CMD)) &&
key->cmd_frame_ids)
return -EINVAL;
list_for_each_entry(pos, &sec->table.keys, list) {
if (llsec_key_id_equal(&pos->id, id))
return -EEXIST;
if (memcmp(pos->key->key, key->key,
IEEE802154_LLSEC_KEY_SIZE))
continue;
mkey = container_of(pos->key, struct mac802154_llsec_key, key);
/* Don't allow multiple instances of the same AES key to have
* different allowed frame types/command frame ids, as this is
* not possible in the 802.15.4 PIB.
*/
if (pos->key->frame_types != key->frame_types ||
pos->key->cmd_frame_ids != key->cmd_frame_ids)
return -EEXIST;
break;
}
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return -ENOMEM;
if (!mkey)
mkey = llsec_key_alloc(key);
else
mkey = llsec_key_get(mkey);
if (!mkey)
goto fail;
new->id = *id;
new->key = &mkey->key;
list_add_rcu(&new->list, &sec->table.keys);
return 0;
fail:
kfree(new);
return -ENOMEM;
}
int mac802154_llsec_key_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_key_id *key)
{
struct ieee802154_llsec_key_entry *pos;
list_for_each_entry(pos, &sec->table.keys, list) {
struct mac802154_llsec_key *mkey;
mkey = container_of(pos->key, struct mac802154_llsec_key, key);
if (llsec_key_id_equal(&pos->id, key)) {
list_del_rcu(&pos->list);
llsec_key_put(mkey);
return 0;
}
}
return -ENOENT;
}
static bool llsec_dev_use_shortaddr(__le16 short_addr)
{
return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) &&
short_addr != cpu_to_le16(0xffff);
}
static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id)
{
return ((__force u16) short_addr) << 16 | (__force u16) pan_id;
}
static u64 llsec_dev_hash_long(__le64 hwaddr)
{
return (__force u64) hwaddr;
}
static struct mac802154_llsec_device*
llsec_dev_find_short(struct mac802154_llsec *sec, __le16 short_addr,
__le16 pan_id)
{
struct mac802154_llsec_device *dev;
u32 key = llsec_dev_hash_short(short_addr, pan_id);
hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) {
if (dev->dev.short_addr == short_addr &&
dev->dev.pan_id == pan_id)
return dev;
}
return NULL;
}
static struct mac802154_llsec_device*
llsec_dev_find_long(struct mac802154_llsec *sec, __le64 hwaddr)
{
struct mac802154_llsec_device *dev;
u64 key = llsec_dev_hash_long(hwaddr);
hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) {
if (dev->dev.hwaddr == hwaddr)
return dev;
}
return NULL;
}
static void llsec_dev_free(struct mac802154_llsec_device *dev)
{
struct ieee802154_llsec_device_key *pos, *pn;
struct mac802154_llsec_device_key *devkey;
list_for_each_entry_safe(pos, pn, &dev->dev.keys, list) {
devkey = container_of(pos, struct mac802154_llsec_device_key,
devkey);
list_del(&pos->list);
kfree(devkey);
}
kfree(dev);
}
int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
const struct ieee802154_llsec_device *dev)
{
struct mac802154_llsec_device *entry;
u32 skey = llsec_dev_hash_short(dev->short_addr, dev->pan_id);
u64 hwkey = llsec_dev_hash_long(dev->hwaddr);
BUILD_BUG_ON(sizeof(hwkey) != IEEE802154_ADDR_LEN);
if ((llsec_dev_use_shortaddr(dev->short_addr) &&
llsec_dev_find_short(sec, dev->short_addr, dev->pan_id)) ||
llsec_dev_find_long(sec, dev->hwaddr))
return -EEXIST;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->dev = *dev;
spin_lock_init(&entry->lock);
INIT_LIST_HEAD(&entry->dev.keys);
if (llsec_dev_use_shortaddr(dev->short_addr))
hash_add_rcu(sec->devices_short, &entry->bucket_s, skey);
else
INIT_HLIST_NODE(&entry->bucket_s);
hash_add_rcu(sec->devices_hw, &entry->bucket_hw, hwkey);
list_add_tail_rcu(&entry->dev.list, &sec->table.devices);
return 0;
}
static void llsec_dev_free_rcu(struct rcu_head *rcu)
{
llsec_dev_free(container_of(rcu, struct mac802154_llsec_device, rcu));
}
int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
{
struct mac802154_llsec_device *pos;
pos = llsec_dev_find_long(sec, device_addr);
if (!pos)
return -ENOENT;
hash_del_rcu(&pos->bucket_s);
hash_del_rcu(&pos->bucket_hw);
call_rcu(&pos->rcu, llsec_dev_free_rcu);
return 0;
}
static struct mac802154_llsec_device_key*
llsec_devkey_find(struct mac802154_llsec_device *dev,
const struct ieee802154_llsec_key_id *key)
{
struct ieee802154_llsec_device_key *devkey;
list_for_each_entry_rcu(devkey, &dev->dev.keys, list) {
if (!llsec_key_id_equal(key, &devkey->key_id))
continue;
return container_of(devkey, struct mac802154_llsec_device_key,
devkey);
}
return NULL;
}
int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
__le64 dev_addr,
const struct ieee802154_llsec_device_key *key)
{
struct mac802154_llsec_device *dev;
struct mac802154_llsec_device_key *devkey;
dev = llsec_dev_find_long(sec, dev_addr);
if (!dev)
return -ENOENT;
if (llsec_devkey_find(dev, &key->key_id))
return -EEXIST;
devkey = kmalloc(sizeof(*devkey), GFP_KERNEL);
if (!devkey)
return -ENOMEM;
devkey->devkey = *key;
list_add_tail_rcu(&devkey->devkey.list, &dev->dev.keys);
return 0;
}
int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
__le64 dev_addr,
const struct ieee802154_llsec_device_key *key)
{
struct mac802154_llsec_device *dev;
struct mac802154_llsec_device_key *devkey;
dev = llsec_dev_find_long(sec, dev_addr);
if (!dev)
return -ENOENT;
devkey = llsec_devkey_find(dev, &key->key_id);
if (!devkey)
return -ENOENT;
list_del_rcu(&devkey->devkey.list);
kfree_rcu(devkey, rcu);
return 0;
}
static struct mac802154_llsec_seclevel*
llsec_find_seclevel(const struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
{
struct ieee802154_llsec_seclevel *pos;
list_for_each_entry(pos, &sec->table.security_levels, list) {
if (pos->frame_type != sl->frame_type ||
(pos->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
pos->cmd_frame_id != sl->cmd_frame_id) ||
pos->device_override != sl->device_override ||
pos->sec_levels != sl->sec_levels)
continue;
return container_of(pos, struct mac802154_llsec_seclevel,
level);
}
return NULL;
}
int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
{
struct mac802154_llsec_seclevel *entry;
if (llsec_find_seclevel(sec, sl))
return -EEXIST;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->level = *sl;
list_add_tail_rcu(&entry->level.list, &sec->table.security_levels);
return 0;
}
int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
{
struct mac802154_llsec_seclevel *pos;
pos = llsec_find_seclevel(sec, sl);
if (!pos)
return -ENOENT;
list_del_rcu(&pos->level.list);
kfree_rcu(pos, rcu);
return 0;
}
static int llsec_recover_addr(struct mac802154_llsec *sec,
struct ieee802154_addr *addr)
{
__le16 caddr = sec->params.coord_shortaddr;
addr->pan_id = sec->params.pan_id;
if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
return -EINVAL;
} else if (caddr == cpu_to_le16(IEEE802154_ADDR_UNDEF)) {
addr->extended_addr = sec->params.coord_hwaddr;
addr->mode = IEEE802154_ADDR_LONG;
} else {
addr->short_addr = sec->params.coord_shortaddr;
addr->mode = IEEE802154_ADDR_SHORT;
}
return 0;
}
static struct mac802154_llsec_key*
llsec_lookup_key(struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
const struct ieee802154_addr *addr,
struct ieee802154_llsec_key_id *key_id)
{
struct ieee802154_addr devaddr = *addr;
u8 key_id_mode = hdr->sec.key_id_mode;
struct ieee802154_llsec_key_entry *key_entry;
struct mac802154_llsec_key *key;
if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT &&
devaddr.mode == IEEE802154_ADDR_NONE) {
if (hdr->fc.type == IEEE802154_FC_TYPE_BEACON) {
devaddr.extended_addr = sec->params.coord_hwaddr;
devaddr.mode = IEEE802154_ADDR_LONG;
} else if (llsec_recover_addr(sec, &devaddr) < 0) {
return NULL;
}
}
list_for_each_entry_rcu(key_entry, &sec->table.keys, list) {
const struct ieee802154_llsec_key_id *id = &key_entry->id;
if (!(key_entry->key->frame_types & BIT(hdr->fc.type)))
continue;
if (id->mode != key_id_mode)
continue;
if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT) {
if (ieee802154_addr_equal(&devaddr, &id->device_addr))
goto found;
} else {
if (id->id != hdr->sec.key_id)
continue;
if ((key_id_mode == IEEE802154_SCF_KEY_INDEX) ||
(key_id_mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
id->short_source == hdr->sec.short_src) ||
(key_id_mode == IEEE802154_SCF_KEY_HW_INDEX &&
id->extended_source == hdr->sec.extended_src))
goto found;
}
}
return NULL;
found:
key = container_of(key_entry->key, struct mac802154_llsec_key, key);
if (key_id)
*key_id = key_entry->id;
return llsec_key_get(key);
}
static void llsec_geniv(u8 iv[16], __le64 addr,
const struct ieee802154_sechdr *sec)
{
__be64 addr_bytes = (__force __be64) swab64((__force u64) addr);
__be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter);
iv[0] = 1; /* L' = L - 1 = 1 */
memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes));
memcpy(iv + 9, &frame_counter, sizeof(frame_counter));
iv[13] = sec->level;
iv[14] = 0;
iv[15] = 1;
}
static int
llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key)
{
u8 iv[16];
struct scatterlist src;
struct blkcipher_desc req = {
.tfm = key->tfm0,
.info = iv,
.flags = 0,
};
llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
sg_init_one(&src, skb->data, skb->len);
return crypto_blkcipher_encrypt_iv(&req, &src, &src, skb->len);
}
static struct crypto_aead*
llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen)
{
int i;
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
if (crypto_aead_authsize(key->tfm[i]) == authlen)
return key->tfm[i];
BUG();
}
static int
llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key)
{
u8 iv[16];
unsigned char *data;
int authlen, assoclen, datalen, rc;
struct scatterlist src, assoc[2], dst[2];
struct aead_request *req;
authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
if (!req)
return -ENOMEM;
sg_init_table(assoc, 2);
sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
assoclen = skb->mac_len;
data = skb_mac_header(skb) + skb->mac_len;
datalen = skb_tail_pointer(skb) - data;
if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
sg_set_buf(&assoc[1], data, 0);
} else {
sg_set_buf(&assoc[1], data, datalen);
assoclen += datalen;
datalen = 0;
}
sg_init_one(&src, data, datalen);
sg_init_table(dst, 2);
sg_set_buf(&dst[0], data, datalen);
sg_set_buf(&dst[1], skb_put(skb, authlen), authlen);
aead_request_set_callback(req, 0, NULL, NULL);
aead_request_set_assoc(req, assoc, assoclen);
aead_request_set_crypt(req, &src, dst, datalen, iv);
rc = crypto_aead_encrypt(req);
kfree(req);
return rc;
}
static int llsec_do_encrypt(struct sk_buff *skb,
const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key)
{
if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
return llsec_do_encrypt_unauth(skb, sec, hdr, key);
else
return llsec_do_encrypt_auth(skb, sec, hdr, key);
}
int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
{
struct ieee802154_hdr hdr;
int rc, authlen, hlen;
struct mac802154_llsec_key *key;
u32 frame_ctr;
hlen = ieee802154_hdr_pull(skb, &hdr);
if (hlen < 0 || hdr.fc.type != IEEE802154_FC_TYPE_DATA)
return -EINVAL;
if (!hdr.fc.security_enabled || hdr.sec.level == 0) {
skb_push(skb, hlen);
return 0;
}
authlen = ieee802154_sechdr_authtag_len(&hdr.sec);
if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU)
return -EMSGSIZE;
rcu_read_lock();
read_lock_bh(&sec->lock);
if (!sec->params.enabled) {
rc = -EINVAL;
goto fail_read;
}
key = llsec_lookup_key(sec, &hdr, &hdr.dest, NULL);
if (!key) {
rc = -ENOKEY;
goto fail_read;
}
read_unlock_bh(&sec->lock);
write_lock_bh(&sec->lock);
frame_ctr = be32_to_cpu(sec->params.frame_counter);
hdr.sec.frame_counter = cpu_to_le32(frame_ctr);
if (frame_ctr == 0xFFFFFFFF) {
write_unlock_bh(&sec->lock);
llsec_key_put(key);
rc = -EOVERFLOW;
goto fail;
}
sec->params.frame_counter = cpu_to_be32(frame_ctr + 1);
write_unlock_bh(&sec->lock);
rcu_read_unlock();
skb->mac_len = ieee802154_hdr_push(skb, &hdr);
skb_reset_mac_header(skb);
rc = llsec_do_encrypt(skb, sec, &hdr, key);
llsec_key_put(key);
return rc;
fail_read:
read_unlock_bh(&sec->lock);
fail:
rcu_read_unlock();
return rc;
}
static struct mac802154_llsec_device*
llsec_lookup_dev(struct mac802154_llsec *sec,
const struct ieee802154_addr *addr)
{
struct ieee802154_addr devaddr = *addr;
struct mac802154_llsec_device *dev = NULL;
if (devaddr.mode == IEEE802154_ADDR_NONE &&
llsec_recover_addr(sec, &devaddr) < 0)
return NULL;
if (devaddr.mode == IEEE802154_ADDR_SHORT) {
u32 key = llsec_dev_hash_short(devaddr.short_addr,
devaddr.pan_id);
hash_for_each_possible_rcu(sec->devices_short, dev,
bucket_s, key) {
if (dev->dev.pan_id == devaddr.pan_id &&
dev->dev.short_addr == devaddr.short_addr)
return dev;
}
} else {
u64 key = llsec_dev_hash_long(devaddr.extended_addr);
hash_for_each_possible_rcu(sec->devices_hw, dev,
bucket_hw, key) {
if (dev->dev.hwaddr == devaddr.extended_addr)
return dev;
}
}
return NULL;
}
static int
llsec_lookup_seclevel(const struct mac802154_llsec *sec,
u8 frame_type, u8 cmd_frame_id,
struct ieee802154_llsec_seclevel *rlevel)
{
struct ieee802154_llsec_seclevel *level;
list_for_each_entry_rcu(level, &sec->table.security_levels, list) {
if (level->frame_type == frame_type &&
(frame_type != IEEE802154_FC_TYPE_MAC_CMD ||
level->cmd_frame_id == cmd_frame_id)) {
*rlevel = *level;
return 0;
}
}
return -EINVAL;
}
static int
llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key, __le64 dev_addr)
{
u8 iv[16];
unsigned char *data;
int datalen;
struct scatterlist src;
struct blkcipher_desc req = {
.tfm = key->tfm0,
.info = iv,
.flags = 0,
};
llsec_geniv(iv, dev_addr, &hdr->sec);
data = skb_mac_header(skb) + skb->mac_len;
datalen = skb_tail_pointer(skb) - data;
sg_init_one(&src, data, datalen);
return crypto_blkcipher_decrypt_iv(&req, &src, &src, datalen);
}
static int
llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key, __le64 dev_addr)
{
u8 iv[16];
unsigned char *data;
int authlen, datalen, assoclen, rc;
struct scatterlist src, assoc[2];
struct aead_request *req;
authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
llsec_geniv(iv, dev_addr, &hdr->sec);
req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
if (!req)
return -ENOMEM;
sg_init_table(assoc, 2);
sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
assoclen = skb->mac_len;
data = skb_mac_header(skb) + skb->mac_len;
datalen = skb_tail_pointer(skb) - data;
if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
sg_set_buf(&assoc[1], data, 0);
} else {
sg_set_buf(&assoc[1], data, datalen - authlen);
assoclen += datalen - authlen;
data += datalen - authlen;
datalen = authlen;
}
sg_init_one(&src, data, datalen);
aead_request_set_callback(req, 0, NULL, NULL);
aead_request_set_assoc(req, assoc, assoclen);
aead_request_set_crypt(req, &src, &src, datalen, iv);
rc = crypto_aead_decrypt(req);
kfree(req);
skb_trim(skb, skb->len - authlen);
return rc;
}
static int
llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec,
const struct ieee802154_hdr *hdr,
struct mac802154_llsec_key *key, __le64 dev_addr)
{
if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr);
else
return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr);
}
static int
llsec_update_devkey_info(struct mac802154_llsec_device *dev,
const struct ieee802154_llsec_key_id *in_key,
u32 frame_counter)
{
struct mac802154_llsec_device_key *devkey = NULL;
if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RESTRICT) {
devkey = llsec_devkey_find(dev, in_key);
if (!devkey)
return -ENOENT;
}
spin_lock_bh(&dev->lock);
if ((!devkey && frame_counter < dev->dev.frame_counter) ||
(devkey && frame_counter < devkey->devkey.frame_counter)) {
spin_unlock_bh(&dev->lock);
return -EINVAL;
}
if (devkey)
devkey->devkey.frame_counter = frame_counter + 1;
else
dev->dev.frame_counter = frame_counter + 1;
spin_unlock_bh(&dev->lock);
return 0;
}
int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
{
struct ieee802154_hdr hdr;
struct mac802154_llsec_key *key;
struct ieee802154_llsec_key_id key_id;
struct mac802154_llsec_device *dev;
struct ieee802154_llsec_seclevel seclevel;
int err;
__le64 dev_addr;
u32 frame_ctr;
if (ieee802154_hdr_peek(skb, &hdr) < 0)
return -EINVAL;
if (!hdr.fc.security_enabled)
return 0;
if (hdr.fc.version == 0)
return -EINVAL;
read_lock_bh(&sec->lock);
if (!sec->params.enabled) {
read_unlock_bh(&sec->lock);
return -EINVAL;
}
read_unlock_bh(&sec->lock);
rcu_read_lock();
key = llsec_lookup_key(sec, &hdr, &hdr.source, &key_id);
if (!key) {
err = -ENOKEY;
goto fail;
}
dev = llsec_lookup_dev(sec, &hdr.source);
if (!dev) {
err = -EINVAL;
goto fail_dev;
}
if (llsec_lookup_seclevel(sec, hdr.fc.type, 0, &seclevel) < 0) {
err = -EINVAL;
goto fail_dev;
}
if (!(seclevel.sec_levels & BIT(hdr.sec.level)) &&
(hdr.sec.level == 0 && seclevel.device_override &&
!dev->dev.seclevel_exempt)) {
err = -EINVAL;
goto fail_dev;
}
frame_ctr = le32_to_cpu(hdr.sec.frame_counter);
if (frame_ctr == 0xffffffff) {
err = -EOVERFLOW;
goto fail_dev;
}
err = llsec_update_devkey_info(dev, &key_id, frame_ctr);
if (err)
goto fail_dev;
dev_addr = dev->dev.hwaddr;
rcu_read_unlock();
err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr);
llsec_key_put(key);
return err;
fail_dev:
llsec_key_put(key);
fail:
rcu_read_unlock();
return err;
}
| nsat/zynq-linux | net/mac802154/llsec.c | C | gpl-2.0 | 24,885 |
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2006, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* $Id: getinfo.c,v 1.1 2006/07/05 22:41:22 victor Exp $
***************************************************************************/
#include "setup.h"
#include <curl/curl.h>
#include "urldata.h"
#include "getinfo.h"
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include "memory.h"
#include "sslgen.h"
/* Make this the last #include */
#include "memdebug.h"
/*
* This is supposed to be called in the beginning of a perform() session
* and should reset all session-info variables
*/
CURLcode Curl_initinfo(struct SessionHandle *data)
{
struct Progress *pro = &data->progress;
struct PureInfo *info =&data->info;
pro->t_nslookup = 0;
pro->t_connect = 0;
pro->t_pretransfer = 0;
pro->t_starttransfer = 0;
pro->timespent = 0;
pro->t_redirect = 0;
info->httpcode = 0;
info->httpversion=0;
info->filetime=-1; /* -1 is an illegal time and thus means unknown */
if (info->contenttype)
free(info->contenttype);
info->contenttype = NULL;
info->header_size = 0;
info->request_size = 0;
info->numconnects = 0;
return CURLE_OK;
}
CURLcode Curl_getinfo(struct SessionHandle *data, CURLINFO info, ...)
{
va_list arg;
long *param_longp=NULL;
double *param_doublep=NULL;
char **param_charp=NULL;
struct curl_slist **param_slistp=NULL;
va_start(arg, info);
switch(info&CURLINFO_TYPEMASK) {
default:
return CURLE_BAD_FUNCTION_ARGUMENT;
case CURLINFO_STRING:
param_charp = va_arg(arg, char **);
if(NULL == param_charp)
return CURLE_BAD_FUNCTION_ARGUMENT;
break;
case CURLINFO_LONG:
param_longp = va_arg(arg, long *);
if(NULL == param_longp)
return CURLE_BAD_FUNCTION_ARGUMENT;
break;
case CURLINFO_DOUBLE:
param_doublep = va_arg(arg, double *);
if(NULL == param_doublep)
return CURLE_BAD_FUNCTION_ARGUMENT;
break;
case CURLINFO_SLIST:
param_slistp = va_arg(arg, struct curl_slist **);
if(NULL == param_slistp)
return CURLE_BAD_FUNCTION_ARGUMENT;
break;
}
switch(info) {
case CURLINFO_EFFECTIVE_URL:
*param_charp = data->change.url?data->change.url:(char *)"";
break;
case CURLINFO_RESPONSE_CODE:
*param_longp = data->info.httpcode;
break;
case CURLINFO_HTTP_CONNECTCODE:
*param_longp = data->info.httpproxycode;
break;
case CURLINFO_FILETIME:
*param_longp = data->info.filetime;
break;
case CURLINFO_HEADER_SIZE:
*param_longp = data->info.header_size;
break;
case CURLINFO_REQUEST_SIZE:
*param_longp = data->info.request_size;
break;
case CURLINFO_TOTAL_TIME:
*param_doublep = data->progress.timespent;
break;
case CURLINFO_NAMELOOKUP_TIME:
*param_doublep = data->progress.t_nslookup;
break;
case CURLINFO_CONNECT_TIME:
*param_doublep = data->progress.t_connect;
break;
case CURLINFO_PRETRANSFER_TIME:
*param_doublep = data->progress.t_pretransfer;
break;
case CURLINFO_STARTTRANSFER_TIME:
*param_doublep = data->progress.t_starttransfer;
break;
case CURLINFO_SIZE_UPLOAD:
*param_doublep = (double)data->progress.uploaded;
break;
case CURLINFO_SIZE_DOWNLOAD:
*param_doublep = (double)data->progress.downloaded;
break;
case CURLINFO_SPEED_DOWNLOAD:
*param_doublep = (double)data->progress.dlspeed;
break;
case CURLINFO_SPEED_UPLOAD:
*param_doublep = (double)data->progress.ulspeed;
break;
case CURLINFO_SSL_VERIFYRESULT:
*param_longp = data->set.ssl.certverifyresult;
break;
case CURLINFO_CONTENT_LENGTH_DOWNLOAD:
*param_doublep = (double)data->progress.size_dl;
break;
case CURLINFO_CONTENT_LENGTH_UPLOAD:
*param_doublep = (double)data->progress.size_ul;
break;
case CURLINFO_REDIRECT_TIME:
*param_doublep = data->progress.t_redirect;
break;
case CURLINFO_REDIRECT_COUNT:
*param_longp = data->set.followlocation;
break;
case CURLINFO_CONTENT_TYPE:
*param_charp = data->info.contenttype;
break;
case CURLINFO_PRIVATE:
*param_charp = data->set.private_data;
break;
case CURLINFO_HTTPAUTH_AVAIL:
*param_longp = data->info.httpauthavail;
break;
case CURLINFO_PROXYAUTH_AVAIL:
*param_longp = data->info.proxyauthavail;
break;
case CURLINFO_OS_ERRNO:
*param_longp = data->state.os_errno;
break;
case CURLINFO_NUM_CONNECTS:
*param_longp = data->info.numconnects;
break;
case CURLINFO_SSL_ENGINES:
*param_slistp = Curl_ssl_engines_list(data);
break;
case CURLINFO_COOKIELIST:
*param_slistp = Curl_cookie_list(data);
break;
case CURLINFO_LASTSOCKET:
if((data->state.lastconnect != -1) &&
(data->state.connects[data->state.lastconnect] != NULL))
*param_longp = data->state.connects[data->state.lastconnect]->
sock[FIRSTSOCKET];
else
*param_longp = -1;
break;
default:
return CURLE_BAD_FUNCTION_ARGUMENT;
}
return CURLE_OK;
}
| pfchrono/mudmagic-client | bundled/curl/getinfo.c | C | gpl-2.0 | 5,871 |
/* nip2-cli.c ... run the nip2 executable, connecting stdin and stdout to the
* console
*
* 11/12/09
* - use SetHandleInformation() to stop the child inheriting the read
* handle (thanks Leo)
*/
/*
Copyright (C) 2008 Imperial College, London
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
These files are distributed with VIPS - http://www.vips.ecs.soton.ac.uk
*/
/* Adapted from sample code by Leo Davidson, with the author's permission.
*/
/* Windows does not let a single exe run in both command-line and GUI mode. To
* run nip2 in command-line mode, we run this CLI wrapper program instead,
* which starts the main nip2 exe, connecting stdin/out/err appropriately.
*/
#include <windows.h>
#include <stdio.h>
#include <io.h>
#include <ctype.h>
#include <glib.h>
void
print_last_error ()
{
char *buf;
if (FormatMessageA (FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_IGNORE_INSERTS |
FORMAT_MESSAGE_FROM_SYSTEM,
NULL,
GetLastError (),
MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPSTR) & buf, 0, NULL))
{
fprintf (stderr, "%s", buf);
LocalFree (buf);
}
}
int
main (int argc, char **argv)
{
char *dirname;
char command[2048];
gboolean quote;
int i, j;
HANDLE hChildStdoutRd;
HANDLE hChildStdoutWr;
SECURITY_ATTRIBUTES saAttr;
PROCESS_INFORMATION processInformation;
STARTUPINFO startUpInfo;
DWORD dwRead;
CHAR buf[1024];
/* we run the nip2.exe in the same directory as this exe: swap the last
* pathname component for nip2.exe
* we change the argv[0] pointer, probably not a good idea
*/
dirname = g_path_get_dirname (argv[0]);
argv[0] = g_build_filename (dirname, "nip2.exe", NULL);
g_free (dirname);
if (_access (argv[0], 00))
{
fprintf (stderr, "cannot access \"%s\"\n", argv[0]);
exit (1);
}
/* build the command string ... we have to quote items containing spaces
*/
command[0] = '\0';
for (i = 0; i < argc; i++)
{
quote = FALSE;
for (j = 0; argv[i][j]; j++)
{
if (isspace (argv[i][j]))
{
quote = TRUE;
break;
}
}
if (i > 0)
{
strncat (command, " ", sizeof (command) - 1);
}
if (quote)
{
strncat (command, "\"", sizeof (command) - 1);
}
strncat (command, argv[i], sizeof (command) - 1);
if (quote)
{
strncat (command, "\"", sizeof (command) - 1);
}
}
if (strlen (command) == sizeof (command) - 1)
{
fprintf (stderr, "command too long\n");
exit (1);
}
/* Create a pipe for the child process's STDOUT.
*/
hChildStdoutRd = NULL;
hChildStdoutWr = NULL;
saAttr.nLength = sizeof (SECURITY_ATTRIBUTES);
saAttr.bInheritHandle = TRUE;
saAttr.lpSecurityDescriptor = NULL;
if (!CreatePipe (&hChildStdoutRd, &hChildStdoutWr, &saAttr, 0))
{
fprintf (stderr, "CreatePipe failed: ");
print_last_error ();
fprintf (stderr, "\n");
exit (1);
}
/* Ensure the read handle to the pipe for STDOUT is not inherited.
*/
if (!SetHandleInformation(hChildStdoutRd, HANDLE_FLAG_INHERIT, 0))
{
fprintf (stderr, "SetHandleInformation failed: ");
print_last_error ();
fprintf (stderr, "\n");
exit (1);
}
/* Run command.
*/
startUpInfo.cb = sizeof (STARTUPINFO);
startUpInfo.lpReserved = NULL;
startUpInfo.lpDesktop = NULL;
startUpInfo.lpTitle = NULL;
startUpInfo.dwFlags = STARTF_USESHOWWINDOW | STARTF_USESTDHANDLES;
startUpInfo.hStdOutput = hChildStdoutWr;
startUpInfo.hStdError = hChildStdoutWr;
startUpInfo.cbReserved2 = 0;
startUpInfo.lpReserved2 = NULL;
startUpInfo.wShowWindow = SW_SHOWNORMAL;
if (!CreateProcess (NULL, command, NULL, /* default security */
NULL, /* default thread security */
TRUE, /* inherit handles */
CREATE_DEFAULT_ERROR_MODE | DETACHED_PROCESS, NULL, /* use default environment */
NULL, /* set default directory */
&startUpInfo, &processInformation))
{
fprintf (stderr, "error running \"%s\": ", command);
print_last_error ();
fprintf (stderr, "\n");
exit (1);
}
/* Close the write end of the pipe before reading from the read end.
*/
CloseHandle (hChildStdoutWr);
while (ReadFile (hChildStdoutRd, buf, sizeof (buf) - 1, &dwRead, NULL) &&
dwRead > 0)
{
buf[dwRead] = '\0';
printf ("%s", buf);
}
CloseHandle (hChildStdoutRd);
return (0);
}
| jcupitt/nip2 | src/nip2-cli.c | C | gpl-2.0 | 5,159 |
/* utility routines for keeping some statistics */
/* (C) 2009 by Harald Welte <laforge@gnumonks.org>
*
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <sys/types.h>
#include <osmocore/linuxlist.h>
#include <osmocore/talloc.h>
#include <osmocore/statistics.h>
static LLIST_HEAD(counters);
void *tall_ctr_ctx;
struct counter *counter_alloc(const char *name)
{
struct counter *ctr = talloc_zero(tall_ctr_ctx, struct counter);
if (!ctr)
return NULL;
ctr->name = name;
llist_add_tail(&ctr->list, &counters);
return ctr;
}
void counter_free(struct counter *ctr)
{
llist_del(&ctr->list);
talloc_free(ctr);
}
int counters_for_each(int (*handle_counter)(struct counter *, void *), void *data)
{
struct counter *ctr;
int rc = 0;
llist_for_each_entry(ctr, &counters, list) {
rc = handle_counter(ctr, data);
if (rc < 0)
return rc;
}
return rc;
}
| techniker/libosmocore | src/statistics.c | C | gpl-2.0 | 1,583 |
/*
* fs/f2fs/recovery.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
/*
* Roll forward recovery scenarios.
*
* [Term] F: fsync_mark, D: dentry_mark
*
* 1. inode(x) | CP | inode(x) | dnode(F)
* -> Update the latest inode(x).
*
* 2. inode(x) | CP | inode(F) | dnode(F)
* -> No problem.
*
* 3. inode(x) | CP | dnode(F) | inode(x)
* -> Recover to the latest dnode(F), and drop the last inode(x)
*
* 4. inode(x) | CP | dnode(F) | inode(F)
* -> No problem.
*
* 5. CP | inode(x) | dnode(F)
* -> The inode(DF) was missing. Should drop this dnode(F).
*
* 6. CP | inode(DF) | dnode(F)
* -> No problem.
*
* 7. CP | dnode(F) | inode(DF)
* -> If f2fs_iget fails, then goto next to find inode(DF).
*
* 8. CP | dnode(F) | inode(x)
* -> If f2fs_iget fails, then goto next to find inode(DF).
* But it will fail due to no inode(DF).
*/
static struct kmem_cache *fsync_entry_slab;
bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
return false;
return true;
}
static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
nid_t ino)
{
struct fsync_inode_entry *entry;
list_for_each_entry(entry, head, list)
if (entry->inode->i_ino == ino)
return entry;
return NULL;
}
static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
struct inode *inode)
{
struct fsync_inode_entry *entry;
entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
if (!entry)
return NULL;
entry->inode = inode;
list_add_tail(&entry->list, head);
return entry;
}
static void del_fsync_inode(struct fsync_inode_entry *entry)
{
iput(entry->inode);
list_del(&entry->list);
kmem_cache_free(fsync_entry_slab, entry);
}
static int recover_dentry(struct inode *inode, struct page *ipage,
struct list_head *dir_list)
{
struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
struct fscrypt_name fname;
struct page *page;
struct inode *dir, *einode;
struct fsync_inode_entry *entry;
int err = 0;
char *name;
entry = get_fsync_inode(dir_list, pino);
if (!entry) {
dir = f2fs_iget(inode->i_sb, pino);
if (IS_ERR(dir)) {
err = PTR_ERR(dir);
goto out;
}
entry = add_fsync_inode(dir_list, dir);
if (!entry) {
err = -ENOMEM;
iput(dir);
goto out;
}
}
dir = entry->inode;
memset(&fname, 0, sizeof(struct fscrypt_name));
fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
fname.disk_name.name = raw_inode->i_name;
if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
WARN_ON(1);
err = -ENAMETOOLONG;
goto out;
}
retry:
de = __f2fs_find_entry(dir, &fname, &page);
if (de && inode->i_ino == le32_to_cpu(de->ino))
goto out_unmap_put;
if (de) {
einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
if (IS_ERR(einode)) {
WARN_ON(1);
err = PTR_ERR(einode);
if (err == -ENOENT)
err = -EEXIST;
goto out_unmap_put;
}
err = acquire_orphan_inode(F2FS_I_SB(inode));
if (err) {
iput(einode);
goto out_unmap_put;
}
f2fs_delete_entry(de, page, dir, einode);
iput(einode);
goto retry;
} else if (IS_ERR(page)) {
err = PTR_ERR(page);
} else {
err = __f2fs_do_add_link(dir, &fname, inode,
inode->i_ino, inode->i_mode);
}
goto out;
out_unmap_put:
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
out:
if (file_enc_name(inode))
name = "<encrypted>";
else
name = raw_inode->i_name;
f2fs_msg(inode->i_sb, KERN_NOTICE,
"%s: ino = %x, name = %s, dir = %lx, err = %d",
__func__, ino_of_node(ipage), name,
IS_ERR(dir) ? 0 : dir->i_ino, err);
return err;
}
static void recover_inode(struct inode *inode, struct page *page)
{
struct f2fs_inode *raw = F2FS_INODE(page);
char *name;
inode->i_mode = le16_to_cpu(raw->i_mode);
f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
if (file_enc_name(inode))
name = "<encrypted>";
else
name = F2FS_INODE(page)->i_name;
f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
ino_of_node(page), name);
}
static bool is_same_inode(struct inode *inode, struct page *ipage)
{
struct f2fs_inode *ri = F2FS_INODE(ipage);
struct timespec disk;
if (!IS_INODE(ipage))
return true;
disk.tv_sec = le64_to_cpu(ri->i_ctime);
disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
if (timespec_compare(&inode->i_ctime, &disk) > 0)
return false;
disk.tv_sec = le64_to_cpu(ri->i_atime);
disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
if (timespec_compare(&inode->i_atime, &disk) > 0)
return false;
disk.tv_sec = le64_to_cpu(ri->i_mtime);
disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
if (timespec_compare(&inode->i_mtime, &disk) > 0)
return false;
return true;
}
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct inode *inode;
struct page *page = NULL;
block_t blkaddr;
int err = 0;
/* get node pages in the current segment */
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
while (1) {
struct fsync_inode_entry *entry;
if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
return 0;
page = get_tmp_page(sbi, blkaddr);
if (cp_ver != cpver_of_node(page))
break;
if (!is_fsync_dnode(page))
goto next;
entry = get_fsync_inode(head, ino_of_node(page));
if (entry) {
if (!is_same_inode(entry->inode, page))
goto next;
} else {
if (IS_INODE(page) && is_dent_dnode(page)) {
err = recover_inode_page(sbi, page);
if (err)
break;
}
/*
* CP | dnode(F) | inode(DF)
* For this case, we should not give up now.
*/
inode = f2fs_iget(sbi->sb, ino_of_node(page));
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
if (err == -ENOENT) {
err = 0;
goto next;
}
break;
}
/* add this fsync inode to the list */
entry = add_fsync_inode(head, inode);
if (!entry) {
err = -ENOMEM;
iput(inode);
break;
}
}
entry->blkaddr = blkaddr;
if (IS_INODE(page) && is_dent_dnode(page))
entry->last_dentry = blkaddr;
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
f2fs_put_page(page, 1);
ra_meta_pages_cond(sbi, blkaddr);
}
f2fs_put_page(page, 1);
return err;
}
static void destroy_fsync_dnodes(struct list_head *head)
{
struct fsync_inode_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, head, list)
del_fsync_inode(entry);
}
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
block_t blkaddr, struct dnode_of_data *dn)
{
struct seg_entry *sentry;
unsigned int segno = GET_SEGNO(sbi, blkaddr);
unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
struct f2fs_summary_block *sum_node;
struct f2fs_summary sum;
struct page *sum_page, *node_page;
struct dnode_of_data tdn = *dn;
nid_t ino, nid;
struct inode *inode;
unsigned int offset;
block_t bidx;
int i;
sentry = get_seg_entry(sbi, segno);
if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
return 0;
/* Get the previous summary */
for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
if (curseg->segno == segno) {
sum = curseg->sum_blk->entries[blkoff];
goto got_it;
}
}
sum_page = get_sum_page(sbi, segno);
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
sum = sum_node->entries[blkoff];
f2fs_put_page(sum_page, 1);
got_it:
/* Use the locked dnode page and inode */
nid = le32_to_cpu(sum.nid);
if (dn->inode->i_ino == nid) {
tdn.nid = nid;
if (!dn->inode_page_locked)
lock_page(dn->inode_page);
tdn.node_page = dn->inode_page;
tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
goto truncate_out;
} else if (dn->nid == nid) {
tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
goto truncate_out;
}
/* Get the node page */
node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page))
return PTR_ERR(node_page);
offset = ofs_of_node(node_page);
ino = ino_of_node(node_page);
f2fs_put_page(node_page, 1);
if (ino != dn->inode->i_ino) {
/* Deallocate previous index in the node page */
inode = f2fs_iget(sbi->sb, ino);
if (IS_ERR(inode))
return PTR_ERR(inode);
} else {
inode = dn->inode;
}
bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
/*
* if inode page is locked, unlock temporarily, but its reference
* count keeps alive.
*/
if (ino == dn->inode->i_ino && dn->inode_page_locked)
unlock_page(dn->inode_page);
set_new_dnode(&tdn, inode, NULL, NULL, 0);
if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
goto out;
if (tdn.data_blkaddr == blkaddr)
truncate_data_blocks_range(&tdn, 1);
f2fs_put_dnode(&tdn);
out:
if (ino != dn->inode->i_ino)
iput(inode);
else if (dn->inode_page_locked)
lock_page(dn->inode_page);
return 0;
truncate_out:
if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
truncate_data_blocks_range(&tdn, 1);
if (dn->inode->i_ino == nid && !dn->inode_page_locked)
unlock_page(dn->inode_page);
return 0;
}
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, block_t blkaddr)
{
struct dnode_of_data dn;
struct node_info ni;
unsigned int start, end;
int err = 0, recovered = 0;
/* step 1: recover xattr */
if (IS_INODE(page)) {
recover_inline_xattr(inode, page);
} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
/*
* Deprecated; xattr blocks should be found from cold log.
* But, we should remain this for backward compatibility.
*/
recover_xattr_data(inode, page, blkaddr);
goto out;
}
/* step 2: recover inline data */
if (recover_inline_data(inode, page))
goto out;
/* step 3: recover data indices */
start = start_bidx_of_node(ofs_of_node(page), inode);
end = start + ADDRS_PER_PAGE(page, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, start, ALLOC_NODE);
if (err)
goto out;
f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
get_node_info(sbi, dn.nid, &ni);
f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
for (; start < end; start++, dn.ofs_in_node++) {
block_t src, dest;
src = datablock_addr(dn.node_page, dn.ofs_in_node);
dest = datablock_addr(page, dn.ofs_in_node);
/* skip recovering if dest is the same as src */
if (src == dest)
continue;
/* dest is invalid, just invalidate src block */
if (dest == NULL_ADDR) {
truncate_data_blocks_range(&dn, 1);
continue;
}
if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
/*
* dest is reserved block, invalidate src block
* and then reserve one new block in dnode page.
*/
if (dest == NEW_ADDR) {
truncate_data_blocks_range(&dn, 1);
reserve_new_block(&dn);
continue;
}
/* dest is valid block, try to recover from src to dest */
if (is_valid_blkaddr(sbi, dest, META_POR)) {
if (src == NULL_ADDR) {
err = reserve_new_block(&dn);
#ifdef CONFIG_F2FS_FAULT_INJECTION
while (err)
err = reserve_new_block(&dn);
#endif
/* We should not get -ENOSPC */
f2fs_bug_on(sbi, err);
if (err)
goto err;
}
/* Check the previous node page having this index */
err = check_index_in_prev_nodes(sbi, dest, &dn);
if (err)
goto err;
/* write dummy data page */
f2fs_replace_block(sbi, &dn, src, dest,
ni.version, false, false);
recovered++;
}
}
copy_node_footer(dn.node_page, page);
fill_node_footer(dn.node_page, dn.nid, ni.ino,
ofs_of_node(page), false);
set_page_dirty(dn.node_page);
err:
f2fs_put_dnode(&dn);
out:
f2fs_msg(sbi->sb, KERN_NOTICE,
"recover_data: ino = %lx, recovered = %d blocks, err = %d",
inode->i_ino, recovered, err);
return err;
}
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
struct list_head *dir_list)
{
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page = NULL;
int err = 0;
block_t blkaddr;
/* get node pages in the current segment */
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
while (1) {
struct fsync_inode_entry *entry;
if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
break;
ra_meta_pages_cond(sbi, blkaddr);
page = get_tmp_page(sbi, blkaddr);
if (cp_ver != cpver_of_node(page)) {
f2fs_put_page(page, 1);
break;
}
entry = get_fsync_inode(inode_list, ino_of_node(page));
if (!entry)
goto next;
/*
* inode(x) | CP | inode(x) | dnode(F)
* In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update.
*/
if (IS_INODE(page))
recover_inode(entry->inode, page);
if (entry->last_dentry == blkaddr) {
err = recover_dentry(entry->inode, page, dir_list);
if (err) {
f2fs_put_page(page, 1);
break;
}
}
err = do_recover_data(sbi, entry->inode, page, blkaddr);
if (err) {
f2fs_put_page(page, 1);
break;
}
if (entry->blkaddr == blkaddr)
del_fsync_inode(entry);
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
f2fs_put_page(page, 1);
}
if (!err)
allocate_new_segments(sbi);
return err;
}
int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct list_head inode_list;
struct list_head dir_list;
block_t blkaddr;
int err;
int ret = 0;
bool need_writecp = false;
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
sizeof(struct fsync_inode_entry));
if (!fsync_entry_slab)
return -ENOMEM;
INIT_LIST_HEAD(&inode_list);
INIT_LIST_HEAD(&dir_list);
/* prevent checkpoint */
mutex_lock(&sbi->cp_mutex);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
/* step #1: find fsynced inode numbers */
err = find_fsync_dnodes(sbi, &inode_list);
if (err || list_empty(&inode_list))
goto out;
if (check_only) {
ret = 1;
goto out;
}
need_writecp = true;
/* step #2: recover data */
err = recover_data(sbi, &inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
out:
destroy_fsync_dnodes(&inode_list);
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
truncate_inode_pages_final(META_MAPPING(sbi));
}
clear_sbi_flag(sbi, SBI_POR_DOING);
if (err) {
bool invalidate = false;
if (test_opt(sbi, LFS)) {
update_meta_page(sbi, NULL, blkaddr);
invalidate = true;
} else if (discard_next_dnode(sbi, blkaddr)) {
invalidate = true;
}
f2fs_wait_all_discard_bio(sbi);
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META))
sync_meta_pages(sbi, META, LONG_MAX);
/* invalidate temporary meta page */
if (invalidate)
invalidate_mapping_pages(META_MAPPING(sbi),
blkaddr, blkaddr);
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
mutex_unlock(&sbi->cp_mutex);
} else if (need_writecp) {
struct cp_control cpc = {
.reason = CP_RECOVERY,
};
mutex_unlock(&sbi->cp_mutex);
err = write_checkpoint(sbi, &cpc);
} else {
mutex_unlock(&sbi->cp_mutex);
}
destroy_fsync_dnodes(&dir_list);
kmem_cache_destroy(fsync_entry_slab);
return ret ? ret: err;
}
| flaming-toast/linux-jeyu | fs/f2fs/recovery.c | C | gpl-2.0 | 16,232 |
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include "config.h"
#include "mp_msg.h"
#include "help_mp.h"
#ifdef __FreeBSD__
#include <sys/cdrio.h>
#endif
#include "m_option.h"
#include "stream.h"
#include "libmpdemux/demuxer.h"
/// We keep these 2 for the gui atm, but they will be removed.
char* cdrom_device=NULL;
int dvd_chapter=1;
int dvd_last_chapter=0;
char* dvd_device=NULL;
char *bluray_device=NULL;
// Open a new stream (stdin/file/vcd/url)
stream_t* open_stream(const char* filename,char** options, int* file_format){
int dummy = DEMUXER_TYPE_UNKNOWN;
if (!file_format) file_format = &dummy;
// Check if playlist or unknown
if (*file_format != DEMUXER_TYPE_PLAYLIST){
*file_format=DEMUXER_TYPE_UNKNOWN;
}
if(!filename) {
mp_msg(MSGT_OPEN,MSGL_ERR,"NULL filename, report this bug\n");
return NULL;
}
//============ Open STDIN or plain FILE ============
return open_stream_full(filename,STREAM_READ,options,file_format);
}
| svn2github/MPlayer-SB | stream/open.c | C | gpl-2.0 | 1,868 |
/*
* Copyright 2008-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
#include "jni.h"
#include "jni_util.h"
#include "jvm.h"
#include "jlong.h"
#include <dlfcn.h>
#include <errno.h>
#include <sys/acl.h>
#include "sun_nio_fs_SolarisNativeDispatcher.h"
static void throwUnixException(JNIEnv* env, int errnum) {
jobject x = JNU_NewObjectByName(env, "sun/nio/fs/UnixException",
"(I)V", errnum);
if (x != NULL) {
(*env)->Throw(env, x);
}
}
JNIEXPORT void JNICALL
Java_sun_nio_fs_SolarisNativeDispatcher_init(JNIEnv *env, jclass clazz) {
}
JNIEXPORT jint JNICALL
Java_sun_nio_fs_SolarisNativeDispatcher_facl(JNIEnv* env, jclass this, jint fd,
jint cmd, jint nentries, jlong address)
{
void* aclbufp = jlong_to_ptr(address);
int n = -1;
n = facl((int)fd, (int)cmd, (int)nentries, aclbufp);
if (n == -1) {
throwUnixException(env, errno);
}
return (jint)n;
}
| TheTypoMaster/Scaper | openjdk/jdk/src/solaris/native/sun/nio/fs/SolarisNativeDispatcher.c | C | gpl-2.0 | 2,058 |
/*
* SSLv3/TLSv1 server-side functions
*
* Copyright (C) 2006-2015, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of mbed TLS (https://tls.mbed.org)
*/
#if !defined(MBEDTLS_CONFIG_FILE)
#include "mbedtls/config.h"
#else
#include MBEDTLS_CONFIG_FILE
#endif
#if defined(MBEDTLS_SSL_SRV_C)
#if defined(MBEDTLS_PLATFORM_C)
#include "mbedtls/platform.h"
#else
#include <stdlib.h>
#define mbedtls_calloc calloc
#define mbedtls_free free
#endif
#include "mbedtls/ssl.h"
#include "mbedtls/ssl_internal.h"
#include "mbedtls/debug.h"
#include "mbedtls/error.h"
#include "mbedtls/platform_util.h"
#include <string.h>
#if defined(MBEDTLS_ECP_C)
#include "mbedtls/ecp.h"
#endif
#if defined(MBEDTLS_HAVE_TIME)
#include "mbedtls/platform_time.h"
#endif
#if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY)
int mbedtls_ssl_set_client_transport_id( mbedtls_ssl_context *ssl,
const unsigned char *info,
size_t ilen )
{
if( ssl->conf->endpoint != MBEDTLS_SSL_IS_SERVER )
return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA );
mbedtls_free( ssl->cli_id );
if( ( ssl->cli_id = mbedtls_calloc( 1, ilen ) ) == NULL )
return( MBEDTLS_ERR_SSL_ALLOC_FAILED );
memcpy( ssl->cli_id, info, ilen );
ssl->cli_id_len = ilen;
return( 0 );
}
void mbedtls_ssl_conf_dtls_cookies( mbedtls_ssl_config *conf,
mbedtls_ssl_cookie_write_t *f_cookie_write,
mbedtls_ssl_cookie_check_t *f_cookie_check,
void *p_cookie )
{
conf->f_cookie_write = f_cookie_write;
conf->f_cookie_check = f_cookie_check;
conf->p_cookie = p_cookie;
}
#endif /* MBEDTLS_SSL_DTLS_HELLO_VERIFY */
#if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION)
static int ssl_parse_servername_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t servername_list_size, hostname_len;
const unsigned char *p;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "parse ServerName extension" ) );
if( len < 2 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
servername_list_size = ( ( buf[0] << 8 ) | ( buf[1] ) );
if( servername_list_size + 2 != len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
p = buf + 2;
while( servername_list_size > 2 )
{
hostname_len = ( ( p[1] << 8 ) | p[2] );
if( hostname_len + 3 > servername_list_size )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( p[0] == MBEDTLS_TLS_EXT_SERVERNAME_HOSTNAME )
{
ret = ssl->conf->f_sni( ssl->conf->p_sni,
ssl, p + 3, hostname_len );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "ssl_sni_wrapper", ret );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_UNRECOGNIZED_NAME );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
return( 0 );
}
servername_list_size -= hostname_len + 3;
p += hostname_len + 3;
}
if( servername_list_size != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
return( 0 );
}
#endif /* MBEDTLS_SSL_SERVER_NAME_INDICATION */
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED)
static int ssl_conf_has_psk_or_cb( mbedtls_ssl_config const *conf )
{
if( conf->f_psk != NULL )
return( 1 );
if( conf->psk_identity_len == 0 || conf->psk_identity == NULL )
return( 0 );
if( conf->psk != NULL && conf->psk_len != 0 )
return( 1 );
#if defined(MBEDTLS_USE_PSA_CRYPTO)
if( conf->psk_opaque != 0 )
return( 1 );
#endif /* MBEDTLS_USE_PSA_CRYPTO */
return( 0 );
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
static int ssl_use_opaque_psk( mbedtls_ssl_context const *ssl )
{
if( ssl->conf->f_psk != NULL )
{
/* If we've used a callback to select the PSK,
* the static configuration is irrelevant. */
if( ssl->handshake->psk_opaque != 0 )
return( 1 );
return( 0 );
}
if( ssl->conf->psk_opaque != 0 )
return( 1 );
return( 0 );
}
#endif /* MBEDTLS_USE_PSA_CRYPTO */
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED */
static int ssl_parse_renegotiation_info( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
/* Check verify-data in constant-time. The length OTOH is no secret */
if( len != 1 + ssl->verify_data_len ||
buf[0] != ssl->verify_data_len ||
mbedtls_ssl_safer_memcmp( buf + 1, ssl->peer_verify_data,
ssl->verify_data_len ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "non-matching renegotiation info" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
}
else
#endif /* MBEDTLS_SSL_RENEGOTIATION */
{
if( len != 1 || buf[0] != 0x0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "non-zero length renegotiation info" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->secure_renegotiation = MBEDTLS_SSL_SECURE_RENEGOTIATION;
}
return( 0 );
}
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
/*
* Status of the implementation of signature-algorithms extension:
*
* Currently, we are only considering the signature-algorithm extension
* to pick a ciphersuite which allows us to send the ServerKeyExchange
* message with a signature-hash combination that the user allows.
*
* We do *not* check whether all certificates in our certificate
* chain are signed with an allowed signature-hash pair.
* This needs to be done at a later stage.
*
*/
static int ssl_parse_signature_algorithms_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
size_t sig_alg_list_size;
const unsigned char *p;
const unsigned char *end = buf + len;
mbedtls_md_type_t md_cur;
mbedtls_pk_type_t sig_cur;
if ( len < 2 ) {
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
sig_alg_list_size = ( ( buf[0] << 8 ) | ( buf[1] ) );
if( sig_alg_list_size + 2 != len ||
sig_alg_list_size % 2 != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* Currently we only guarantee signing the ServerKeyExchange message according
* to the constraints specified in this extension (see above), so it suffices
* to remember only one suitable hash for each possible signature algorithm.
*
* This will change when we also consider certificate signatures,
* in which case we will need to remember the whole signature-hash
* pair list from the extension.
*/
for( p = buf + 2; p < end; p += 2 )
{
/* Silently ignore unknown signature or hash algorithms. */
if( ( sig_cur = mbedtls_ssl_pk_alg_from_sig( p[1] ) ) == MBEDTLS_PK_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext"
" unknown sig alg encoding %d", p[1] ) );
continue;
}
/* Check if we support the hash the user proposes */
md_cur = mbedtls_ssl_md_alg_from_hash( p[0] );
if( md_cur == MBEDTLS_MD_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext:"
" unknown hash alg encoding %d", p[0] ) );
continue;
}
if( mbedtls_ssl_check_sig_hash( ssl, md_cur ) == 0 )
{
mbedtls_ssl_sig_hash_set_add( &ssl->handshake->hash_algs, sig_cur, md_cur );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext:"
" match sig %d and hash %d",
sig_cur, md_cur ) );
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext: "
"hash alg %d not supported", md_cur ) );
}
}
return( 0 );
}
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 &&
MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \
defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
static int ssl_parse_supported_elliptic_curves( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
size_t list_size, our_size;
const unsigned char *p;
const mbedtls_ecp_curve_info *curve_info, **curves;
if ( len < 2 ) {
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
list_size = ( ( buf[0] << 8 ) | ( buf[1] ) );
if( list_size + 2 != len ||
list_size % 2 != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* Should never happen unless client duplicates the extension */
if( ssl->handshake->curves != NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* Don't allow our peer to make us allocate too much memory,
* and leave room for a final 0 */
our_size = list_size / 2 + 1;
if( our_size > MBEDTLS_ECP_DP_MAX )
our_size = MBEDTLS_ECP_DP_MAX;
if( ( curves = mbedtls_calloc( our_size, sizeof( *curves ) ) ) == NULL )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_INTERNAL_ERROR );
return( MBEDTLS_ERR_SSL_ALLOC_FAILED );
}
ssl->handshake->curves = curves;
p = buf + 2;
while( list_size > 0 && our_size > 1 )
{
curve_info = mbedtls_ecp_curve_info_from_tls_id( ( p[0] << 8 ) | p[1] );
if( curve_info != NULL )
{
*curves++ = curve_info;
our_size--;
}
list_size -= 2;
p += 2;
}
return( 0 );
}
static int ssl_parse_supported_point_formats( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
size_t list_size;
const unsigned char *p;
if( len == 0 || (size_t)( buf[0] + 1 ) != len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
list_size = buf[0];
p = buf + 1;
while( list_size > 0 )
{
if( p[0] == MBEDTLS_ECP_PF_UNCOMPRESSED ||
p[0] == MBEDTLS_ECP_PF_COMPRESSED )
{
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C)
ssl->handshake->ecdh_ctx.point_format = p[0];
#endif
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
ssl->handshake->ecjpake_ctx.point_format = p[0];
#endif
MBEDTLS_SSL_DEBUG_MSG( 4, ( "point format selected: %d", p[0] ) );
return( 0 );
}
list_size--;
p++;
}
return( 0 );
}
#endif /* MBEDTLS_ECDH_C || MBEDTLS_ECDSA_C ||
MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
static int ssl_parse_ecjpake_kkpp( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
if( mbedtls_ecjpake_check( &ssl->handshake->ecjpake_ctx ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "skip ecjpake kkpp extension" ) );
return( 0 );
}
if( ( ret = mbedtls_ecjpake_read_round_one( &ssl->handshake->ecjpake_ctx,
buf, len ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecjpake_read_round_one", ret );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( ret );
}
/* Only mark the extension as OK when we're sure it is */
ssl->handshake->cli_exts |= MBEDTLS_TLS_EXT_ECJPAKE_KKPP_OK;
return( 0 );
}
#endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH)
static int ssl_parse_max_fragment_length_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
if( len != 1 || buf[0] >= MBEDTLS_SSL_MAX_FRAG_LEN_INVALID )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->session_negotiate->mfl_code = buf[0];
return( 0 );
}
#endif /* MBEDTLS_SSL_MAX_FRAGMENT_LENGTH */
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
static int ssl_parse_cid_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
size_t peer_cid_len;
/* CID extension only makes sense in DTLS */
if( ssl->conf->transport != MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/*
* Quoting draft-ietf-tls-dtls-connection-id-05
* https://tools.ietf.org/html/draft-ietf-tls-dtls-connection-id-05
*
* struct {
* opaque cid<0..2^8-1>;
* } ConnectionId;
*/
if( len < 1 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
peer_cid_len = *buf++;
len--;
if( len != peer_cid_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* Ignore CID if the user has disabled its use. */
if( ssl->negotiate_cid == MBEDTLS_SSL_CID_DISABLED )
{
/* Leave ssl->handshake->cid_in_use in its default
* value of MBEDTLS_SSL_CID_DISABLED. */
MBEDTLS_SSL_DEBUG_MSG( 3, ( "Client sent CID extension, but CID disabled" ) );
return( 0 );
}
if( peer_cid_len > MBEDTLS_SSL_CID_OUT_LEN_MAX )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->handshake->cid_in_use = MBEDTLS_SSL_CID_ENABLED;
ssl->handshake->peer_cid_len = (uint8_t) peer_cid_len;
memcpy( ssl->handshake->peer_cid, buf, peer_cid_len );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "Use of CID extension negotiated" ) );
MBEDTLS_SSL_DEBUG_BUF( 3, "Client CID", buf, peer_cid_len );
return( 0 );
}
#endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */
#if defined(MBEDTLS_SSL_TRUNCATED_HMAC)
static int ssl_parse_truncated_hmac_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
if( len != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
((void) buf);
if( ssl->conf->trunc_hmac == MBEDTLS_SSL_TRUNC_HMAC_ENABLED )
ssl->session_negotiate->trunc_hmac = MBEDTLS_SSL_TRUNC_HMAC_ENABLED;
return( 0 );
}
#endif /* MBEDTLS_SSL_TRUNCATED_HMAC */
#if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC)
static int ssl_parse_encrypt_then_mac_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
if( len != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
((void) buf);
if( ssl->conf->encrypt_then_mac == MBEDTLS_SSL_ETM_ENABLED &&
ssl->minor_ver != MBEDTLS_SSL_MINOR_VERSION_0 )
{
ssl->session_negotiate->encrypt_then_mac = MBEDTLS_SSL_ETM_ENABLED;
}
return( 0 );
}
#endif /* MBEDTLS_SSL_ENCRYPT_THEN_MAC */
#if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET)
static int ssl_parse_extended_ms_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
if( len != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
((void) buf);
if( ssl->conf->extended_ms == MBEDTLS_SSL_EXTENDED_MS_ENABLED &&
ssl->minor_ver != MBEDTLS_SSL_MINOR_VERSION_0 )
{
ssl->handshake->extended_ms = MBEDTLS_SSL_EXTENDED_MS_ENABLED;
}
return( 0 );
}
#endif /* MBEDTLS_SSL_EXTENDED_MASTER_SECRET */
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
static int ssl_parse_session_ticket_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t len )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
mbedtls_ssl_session session;
mbedtls_ssl_session_init( &session );
if( ssl->conf->f_ticket_parse == NULL ||
ssl->conf->f_ticket_write == NULL )
{
return( 0 );
}
/* Remember the client asked us to send a new ticket */
ssl->handshake->new_session_ticket = 1;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket length: %d", len ) );
if( len == 0 )
return( 0 );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket rejected: renegotiating" ) );
return( 0 );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
/*
* Failures are ok: just ignore the ticket and proceed.
*/
if( ( ret = ssl->conf->f_ticket_parse( ssl->conf->p_ticket, &session,
buf, len ) ) != 0 )
{
mbedtls_ssl_session_free( &session );
if( ret == MBEDTLS_ERR_SSL_INVALID_MAC )
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket is not authentic" ) );
else if( ret == MBEDTLS_ERR_SSL_SESSION_TICKET_EXPIRED )
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ticket is expired" ) );
else
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_ticket_parse", ret );
return( 0 );
}
/*
* Keep the session ID sent by the client, since we MUST send it back to
* inform them we're accepting the ticket (RFC 5077 section 3.4)
*/
session.id_len = ssl->session_negotiate->id_len;
memcpy( &session.id, ssl->session_negotiate->id, session.id_len );
mbedtls_ssl_session_free( ssl->session_negotiate );
memcpy( ssl->session_negotiate, &session, sizeof( mbedtls_ssl_session ) );
/* Zeroize instead of free as we copied the content */
mbedtls_platform_zeroize( &session, sizeof( mbedtls_ssl_session ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "session successfully restored from ticket" ) );
ssl->handshake->resume = 1;
/* Don't send a new ticket after all, this one is OK */
ssl->handshake->new_session_ticket = 0;
return( 0 );
}
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
#if defined(MBEDTLS_SSL_ALPN)
static int ssl_parse_alpn_ext( mbedtls_ssl_context *ssl,
const unsigned char *buf, size_t len )
{
size_t list_len, cur_len, ours_len;
const unsigned char *theirs, *start, *end;
const char **ours;
/* If ALPN not configured, just ignore the extension */
if( ssl->conf->alpn_list == NULL )
return( 0 );
/*
* opaque ProtocolName<1..2^8-1>;
*
* struct {
* ProtocolName protocol_name_list<2..2^16-1>
* } ProtocolNameList;
*/
/* Min length is 2 (list_len) + 1 (name_len) + 1 (name) */
if( len < 4 )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
list_len = ( buf[0] << 8 ) | buf[1];
if( list_len != len - 2 )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/*
* Validate peer's list (lengths)
*/
start = buf + 2;
end = buf + len;
for( theirs = start; theirs != end; theirs += cur_len )
{
cur_len = *theirs++;
/* Current identifier must fit in list */
if( cur_len > (size_t)( end - theirs ) )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* Empty strings MUST NOT be included */
if( cur_len == 0 )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
}
/*
* Use our order of preference
*/
for( ours = ssl->conf->alpn_list; *ours != NULL; ours++ )
{
ours_len = strlen( *ours );
for( theirs = start; theirs != end; theirs += cur_len )
{
cur_len = *theirs++;
if( cur_len == ours_len &&
memcmp( theirs, *ours, cur_len ) == 0 )
{
ssl->alpn_chosen = *ours;
return( 0 );
}
}
}
/* If we get there, no match was found */
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_NO_APPLICATION_PROTOCOL );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif /* MBEDTLS_SSL_ALPN */
/*
* Auxiliary functions for ServerHello parsing and related actions
*/
#if defined(MBEDTLS_X509_CRT_PARSE_C)
/*
* Return 0 if the given key uses one of the acceptable curves, -1 otherwise
*/
#if defined(MBEDTLS_ECDSA_C)
static int ssl_check_key_curve( mbedtls_pk_context *pk,
const mbedtls_ecp_curve_info **curves )
{
const mbedtls_ecp_curve_info **crv = curves;
mbedtls_ecp_group_id grp_id = mbedtls_pk_ec( *pk )->grp.id;
while( *crv != NULL )
{
if( (*crv)->grp_id == grp_id )
return( 0 );
crv++;
}
return( -1 );
}
#endif /* MBEDTLS_ECDSA_C */
/*
* Try picking a certificate for this ciphersuite,
* return 0 on success and -1 on failure.
*/
static int ssl_pick_cert( mbedtls_ssl_context *ssl,
const mbedtls_ssl_ciphersuite_t * ciphersuite_info )
{
mbedtls_ssl_key_cert *cur, *list, *fallback = NULL;
mbedtls_pk_type_t pk_alg =
mbedtls_ssl_get_ciphersuite_sig_pk_alg( ciphersuite_info );
uint32_t flags;
#if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION)
if( ssl->handshake->sni_key_cert != NULL )
list = ssl->handshake->sni_key_cert;
else
#endif
list = ssl->conf->key_cert;
if( pk_alg == MBEDTLS_PK_NONE )
return( 0 );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite requires certificate" ) );
if( list == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server has no certificate" ) );
return( -1 );
}
for( cur = list; cur != NULL; cur = cur->next )
{
flags = 0;
MBEDTLS_SSL_DEBUG_CRT( 3, "candidate certificate chain, certificate",
cur->cert );
if( ! mbedtls_pk_can_do( &cur->cert->pk, pk_alg ) )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "certificate mismatch: key type" ) );
continue;
}
/*
* This avoids sending the client a cert it'll reject based on
* keyUsage or other extensions.
*
* It also allows the user to provision different certificates for
* different uses based on keyUsage, eg if they want to avoid signing
* and decrypting with the same RSA key.
*/
if( mbedtls_ssl_check_cert_usage( cur->cert, ciphersuite_info,
MBEDTLS_SSL_IS_SERVER, &flags ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "certificate mismatch: "
"(extended) key usage extension" ) );
continue;
}
#if defined(MBEDTLS_ECDSA_C)
if( pk_alg == MBEDTLS_PK_ECDSA &&
ssl_check_key_curve( &cur->cert->pk, ssl->handshake->curves ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "certificate mismatch: elliptic curve" ) );
continue;
}
#endif
/*
* Try to select a SHA-1 certificate for pre-1.2 clients, but still
* present them a SHA-higher cert rather than failing if it's the only
* one we got that satisfies the other conditions.
*/
if( ssl->minor_ver < MBEDTLS_SSL_MINOR_VERSION_3 &&
cur->cert->sig_md != MBEDTLS_MD_SHA1 )
{
if( fallback == NULL )
fallback = cur;
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "certificate not preferred: "
"sha-2 with pre-TLS 1.2 client" ) );
continue;
}
}
/* If we get there, we got a winner */
break;
}
if( cur == NULL )
cur = fallback;
/* Do not update ssl->handshake->key_cert unless there is a match */
if( cur != NULL )
{
ssl->handshake->key_cert = cur;
MBEDTLS_SSL_DEBUG_CRT( 3, "selected certificate chain, certificate",
ssl->handshake->key_cert->cert );
return( 0 );
}
return( -1 );
}
#endif /* MBEDTLS_X509_CRT_PARSE_C */
/*
* Check if a given ciphersuite is suitable for use with our config/keys/etc
* Sets ciphersuite_info only if the suite matches.
*/
static int ssl_ciphersuite_match( mbedtls_ssl_context *ssl, int suite_id,
const mbedtls_ssl_ciphersuite_t **ciphersuite_info )
{
const mbedtls_ssl_ciphersuite_t *suite_info;
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
mbedtls_pk_type_t sig_type;
#endif
suite_info = mbedtls_ssl_ciphersuite_from_id( suite_id );
if( suite_info == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "trying ciphersuite: %s", suite_info->name ) );
if( suite_info->min_minor_ver > ssl->minor_ver ||
suite_info->max_minor_ver < ssl->minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: version" ) );
return( 0 );
}
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
( suite_info->flags & MBEDTLS_CIPHERSUITE_NODTLS ) )
return( 0 );
#endif
#if defined(MBEDTLS_ARC4_C)
if( ssl->conf->arc4_disabled == MBEDTLS_SSL_ARC4_DISABLED &&
suite_info->cipher == MBEDTLS_CIPHER_ARC4_128 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: rc4" ) );
return( 0 );
}
#endif
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
if( suite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECJPAKE &&
( ssl->handshake->cli_exts & MBEDTLS_TLS_EXT_ECJPAKE_KKPP_OK ) == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: ecjpake "
"not configured or ext missing" ) );
return( 0 );
}
#endif
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C)
if( mbedtls_ssl_ciphersuite_uses_ec( suite_info ) &&
( ssl->handshake->curves == NULL ||
ssl->handshake->curves[0] == NULL ) )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: "
"no common elliptic curve" ) );
return( 0 );
}
#endif
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED)
/* If the ciphersuite requires a pre-shared key and we don't
* have one, skip it now rather than failing later */
if( mbedtls_ssl_ciphersuite_uses_psk( suite_info ) &&
ssl_conf_has_psk_or_cb( ssl->conf ) == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: no pre-shared key" ) );
return( 0 );
}
#endif
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
/* If the ciphersuite requires signing, check whether
* a suitable hash algorithm is present. */
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
sig_type = mbedtls_ssl_get_ciphersuite_sig_alg( suite_info );
if( sig_type != MBEDTLS_PK_NONE &&
mbedtls_ssl_sig_hash_set_find( &ssl->handshake->hash_algs, sig_type ) == MBEDTLS_MD_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: no suitable hash algorithm "
"for signature algorithm %d", sig_type ) );
return( 0 );
}
}
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 &&
MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
#if defined(MBEDTLS_X509_CRT_PARSE_C)
/*
* Final check: if ciphersuite requires us to have a
* certificate/key of a particular type:
* - select the appropriate certificate if we have one, or
* - try the next ciphersuite if we don't
* This must be done last since we modify the key_cert list.
*/
if( ssl_pick_cert( ssl, suite_info ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciphersuite mismatch: "
"no suitable certificate" ) );
return( 0 );
}
#endif
*ciphersuite_info = suite_info;
return( 0 );
}
#if defined(MBEDTLS_SSL_SRV_SUPPORT_SSLV2_CLIENT_HELLO)
static int ssl_parse_client_hello_v2( mbedtls_ssl_context *ssl )
{
int ret, got_common_suite;
unsigned int i, j;
size_t n;
unsigned int ciph_len, sess_len, chal_len;
unsigned char *buf, *p;
const int *ciphersuites;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse client hello v2" ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "client hello v2 illegal for renegotiation" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
buf = ssl->in_hdr;
MBEDTLS_SSL_DEBUG_BUF( 4, "record header", buf, 5 );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, message type: %d",
buf[2] ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, message len.: %d",
( ( buf[0] & 0x7F ) << 8 ) | buf[1] ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, max. version: [%d:%d]",
buf[3], buf[4] ) );
/*
* SSLv2 Client Hello
*
* Record layer:
* 0 . 1 message length
*
* SSL layer:
* 2 . 2 message type
* 3 . 4 protocol version
*/
if( buf[2] != MBEDTLS_SSL_HS_CLIENT_HELLO ||
buf[3] != MBEDTLS_SSL_MAJOR_VERSION_3 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
n = ( ( buf[0] << 8 ) | buf[1] ) & 0x7FFF;
if( n < 17 || n > 512 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->major_ver = MBEDTLS_SSL_MAJOR_VERSION_3;
ssl->minor_ver = ( buf[4] <= ssl->conf->max_minor_ver )
? buf[4] : ssl->conf->max_minor_ver;
if( ssl->minor_ver < ssl->conf->min_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "client only supports ssl smaller than minimum"
" [%d:%d] < [%d:%d]",
ssl->major_ver, ssl->minor_ver,
ssl->conf->min_major_ver, ssl->conf->min_minor_ver ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_PROTOCOL_VERSION );
return( MBEDTLS_ERR_SSL_BAD_HS_PROTOCOL_VERSION );
}
ssl->handshake->max_major_ver = buf[3];
ssl->handshake->max_minor_ver = buf[4];
if( ( ret = mbedtls_ssl_fetch_input( ssl, 2 + n ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_fetch_input", ret );
return( ret );
}
ssl->handshake->update_checksum( ssl, buf + 2, n );
buf = ssl->in_msg;
n = ssl->in_left - 5;
/*
* 0 . 1 ciphersuitelist length
* 2 . 3 session id length
* 4 . 5 challenge length
* 6 . .. ciphersuitelist
* .. . .. session id
* .. . .. challenge
*/
MBEDTLS_SSL_DEBUG_BUF( 4, "record contents", buf, n );
ciph_len = ( buf[0] << 8 ) | buf[1];
sess_len = ( buf[2] << 8 ) | buf[3];
chal_len = ( buf[4] << 8 ) | buf[5];
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciph_len: %d, sess_len: %d, chal_len: %d",
ciph_len, sess_len, chal_len ) );
/*
* Make sure each parameter length is valid
*/
if( ciph_len < 3 || ( ciph_len % 3 ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( sess_len > 32 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( chal_len < 8 || chal_len > 32 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( n != 6 + ciph_len + sess_len + chal_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, ciphersuitelist",
buf + 6, ciph_len );
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, session id",
buf + 6 + ciph_len, sess_len );
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, challenge",
buf + 6 + ciph_len + sess_len, chal_len );
p = buf + 6 + ciph_len;
ssl->session_negotiate->id_len = sess_len;
memset( ssl->session_negotiate->id, 0,
sizeof( ssl->session_negotiate->id ) );
memcpy( ssl->session_negotiate->id, p, ssl->session_negotiate->id_len );
p += sess_len;
memset( ssl->handshake->randbytes, 0, 64 );
memcpy( ssl->handshake->randbytes + 32 - chal_len, p, chal_len );
/*
* Check for TLS_EMPTY_RENEGOTIATION_INFO_SCSV
*/
for( i = 0, p = buf + 6; i < ciph_len; i += 3, p += 3 )
{
if( p[0] == 0 && p[1] == 0 && p[2] == MBEDTLS_SSL_EMPTY_RENEGOTIATION_INFO )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "received TLS_EMPTY_RENEGOTIATION_INFO " ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "received RENEGOTIATION SCSV "
"during renegotiation" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
ssl->secure_renegotiation = MBEDTLS_SSL_SECURE_RENEGOTIATION;
break;
}
}
#if defined(MBEDTLS_SSL_FALLBACK_SCSV)
for( i = 0, p = buf + 6; i < ciph_len; i += 3, p += 3 )
{
if( p[0] == 0 &&
p[1] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE >> 8 ) & 0xff ) &&
p[2] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE ) & 0xff ) )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "received FALLBACK_SCSV" ) );
if( ssl->minor_ver < ssl->conf->max_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "inapropriate fallback" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_INAPROPRIATE_FALLBACK );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
break;
}
}
#endif /* MBEDTLS_SSL_FALLBACK_SCSV */
got_common_suite = 0;
ciphersuites = ssl->conf->ciphersuite_list[ssl->minor_ver];
ciphersuite_info = NULL;
#if defined(MBEDTLS_SSL_SRV_RESPECT_CLIENT_PREFERENCE)
for( j = 0, p = buf + 6; j < ciph_len; j += 3, p += 3 )
for( i = 0; ciphersuites[i] != 0; i++ )
#else
for( i = 0; ciphersuites[i] != 0; i++ )
for( j = 0, p = buf + 6; j < ciph_len; j += 3, p += 3 )
#endif
{
if( p[0] != 0 ||
p[1] != ( ( ciphersuites[i] >> 8 ) & 0xFF ) ||
p[2] != ( ( ciphersuites[i] ) & 0xFF ) )
continue;
got_common_suite = 1;
if( ( ret = ssl_ciphersuite_match( ssl, ciphersuites[i],
&ciphersuite_info ) ) != 0 )
return( ret );
if( ciphersuite_info != NULL )
goto have_ciphersuite_v2;
}
if( got_common_suite )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got ciphersuites in common, "
"but none of them usable" ) );
return( MBEDTLS_ERR_SSL_NO_USABLE_CIPHERSUITE );
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no ciphersuites in common" ) );
return( MBEDTLS_ERR_SSL_NO_CIPHER_CHOSEN );
}
have_ciphersuite_v2:
MBEDTLS_SSL_DEBUG_MSG( 2, ( "selected ciphersuite: %s", ciphersuite_info->name ) );
ssl->session_negotiate->ciphersuite = ciphersuites[i];
ssl->handshake->ciphersuite_info = ciphersuite_info;
/*
* SSLv2 Client Hello relevant renegotiation security checks
*/
if( ssl->secure_renegotiation == MBEDTLS_SSL_LEGACY_RENEGOTIATION &&
ssl->conf->allow_legacy_renegotiation == MBEDTLS_SSL_LEGACY_BREAK_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "legacy renegotiation, breaking off handshake" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->in_left = 0;
ssl->state++;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse client hello v2" ) );
return( 0 );
}
#endif /* MBEDTLS_SSL_SRV_SUPPORT_SSLV2_CLIENT_HELLO */
/* This function doesn't alert on errors that happen early during
ClientHello parsing because they might indicate that the client is
not talking SSL/TLS at all and would not understand our alert. */
static int ssl_parse_client_hello( mbedtls_ssl_context *ssl )
{
int ret, got_common_suite;
size_t i, j;
size_t ciph_offset, comp_offset, ext_offset;
size_t msg_len, ciph_len, sess_len, comp_len, ext_len;
#if defined(MBEDTLS_SSL_PROTO_DTLS)
size_t cookie_offset, cookie_len;
#endif
unsigned char *buf, *p, *ext;
#if defined(MBEDTLS_SSL_RENEGOTIATION)
int renegotiation_info_seen = 0;
#endif
int handshake_failure = 0;
const int *ciphersuites;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info;
int major, minor;
/* If there is no signature-algorithm extension present,
* we need to fall back to the default values for allowed
* signature-hash pairs. */
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
int sig_hash_alg_ext_present = 0;
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 &&
MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse client hello" ) );
#if defined(MBEDTLS_SSL_DTLS_ANTI_REPLAY)
read_record_header:
#endif
/*
* If renegotiating, then the input was read with mbedtls_ssl_read_record(),
* otherwise read it ourselves manually in order to support SSLv2
* ClientHello, which doesn't use the same record layer format.
*/
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status == MBEDTLS_SSL_INITIAL_HANDSHAKE )
#endif
{
if( ( ret = mbedtls_ssl_fetch_input( ssl, 5 ) ) != 0 )
{
/* No alert on a read error. */
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_fetch_input", ret );
return( ret );
}
}
buf = ssl->in_hdr;
#if defined(MBEDTLS_SSL_SRV_SUPPORT_SSLV2_CLIENT_HELLO)
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_STREAM )
#endif
if( ( buf[0] & 0x80 ) != 0 )
return( ssl_parse_client_hello_v2( ssl ) );
#endif
MBEDTLS_SSL_DEBUG_BUF( 4, "record header", buf, mbedtls_ssl_in_hdr_len( ssl ) );
/*
* SSLv3/TLS Client Hello
*
* Record layer:
* 0 . 0 message type
* 1 . 2 protocol version
* 3 . 11 DTLS: epoch + record sequence number
* 3 . 4 message length
*/
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, message type: %d",
buf[0] ) );
if( buf[0] != MBEDTLS_SSL_MSG_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, message len.: %d",
( ssl->in_len[0] << 8 ) | ssl->in_len[1] ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, protocol version: [%d:%d]",
buf[1], buf[2] ) );
mbedtls_ssl_read_version( &major, &minor, ssl->conf->transport, buf + 1 );
/* According to RFC 5246 Appendix E.1, the version here is typically
* "{03,00}, the lowest version number supported by the client, [or] the
* value of ClientHello.client_version", so the only meaningful check here
* is the major version shouldn't be less than 3 */
if( major < MBEDTLS_SSL_MAJOR_VERSION_3 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/* For DTLS if this is the initial handshake, remember the client sequence
* number to use it in our next message (RFC 6347 4.2.1) */
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM
#if defined(MBEDTLS_SSL_RENEGOTIATION)
&& ssl->renego_status == MBEDTLS_SSL_INITIAL_HANDSHAKE
#endif
)
{
/* Epoch should be 0 for initial handshakes */
if( ssl->in_ctr[0] != 0 || ssl->in_ctr[1] != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
memcpy( ssl->cur_out_ctr + 2, ssl->in_ctr + 2, 6 );
#if defined(MBEDTLS_SSL_DTLS_ANTI_REPLAY)
if( mbedtls_ssl_dtls_replay_check( ssl ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "replayed record, discarding" ) );
ssl->next_record_offset = 0;
ssl->in_left = 0;
goto read_record_header;
}
/* No MAC to check yet, so we can update right now */
mbedtls_ssl_dtls_replay_update( ssl );
#endif
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
msg_len = ( ssl->in_len[0] << 8 ) | ssl->in_len[1];
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
/* Set by mbedtls_ssl_read_record() */
msg_len = ssl->in_hslen;
}
else
#endif
{
if( msg_len > MBEDTLS_SSL_IN_CONTENT_LEN )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( ( ret = mbedtls_ssl_fetch_input( ssl,
mbedtls_ssl_in_hdr_len( ssl ) + msg_len ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_fetch_input", ret );
return( ret );
}
/* Done reading this record, get ready for the next one */
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
ssl->next_record_offset = msg_len + mbedtls_ssl_in_hdr_len( ssl );
else
#endif
ssl->in_left = 0;
}
buf = ssl->in_msg;
MBEDTLS_SSL_DEBUG_BUF( 4, "record contents", buf, msg_len );
ssl->handshake->update_checksum( ssl, buf, msg_len );
/*
* Handshake layer:
* 0 . 0 handshake type
* 1 . 3 handshake length
* 4 . 5 DTLS only: message seqence number
* 6 . 8 DTLS only: fragment offset
* 9 . 11 DTLS only: fragment length
*/
if( msg_len < mbedtls_ssl_hs_hdr_len( ssl ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, handshake type: %d", buf[0] ) );
if( buf[0] != MBEDTLS_SSL_HS_CLIENT_HELLO )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, handshake len.: %d",
( buf[1] << 16 ) | ( buf[2] << 8 ) | buf[3] ) );
/* We don't support fragmentation of ClientHello (yet?) */
if( buf[1] != 0 ||
msg_len != mbedtls_ssl_hs_hdr_len( ssl ) + ( ( buf[2] << 8 ) | buf[3] ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
/*
* Copy the client's handshake message_seq on initial handshakes,
* check sequence number on renego.
*/
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS )
{
/* This couldn't be done in ssl_prepare_handshake_record() */
unsigned int cli_msg_seq = ( ssl->in_msg[4] << 8 ) |
ssl->in_msg[5];
if( cli_msg_seq != ssl->handshake->in_msg_seq )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message_seq: "
"%d (expected %d)", cli_msg_seq,
ssl->handshake->in_msg_seq ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->handshake->in_msg_seq++;
}
else
#endif
{
unsigned int cli_msg_seq = ( ssl->in_msg[4] << 8 ) |
ssl->in_msg[5];
ssl->handshake->out_msg_seq = cli_msg_seq;
ssl->handshake->in_msg_seq = cli_msg_seq + 1;
}
/*
* For now we don't support fragmentation, so make sure
* fragment_offset == 0 and fragment_length == length
*/
if( ssl->in_msg[6] != 0 || ssl->in_msg[7] != 0 || ssl->in_msg[8] != 0 ||
memcmp( ssl->in_msg + 1, ssl->in_msg + 9, 3 ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "ClientHello fragmentation not supported" ) );
return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE );
}
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
buf += mbedtls_ssl_hs_hdr_len( ssl );
msg_len -= mbedtls_ssl_hs_hdr_len( ssl );
/*
* ClientHello layer:
* 0 . 1 protocol version
* 2 . 33 random bytes (starting with 4 bytes of Unix time)
* 34 . 35 session id length (1 byte)
* 35 . 34+x session id
* 35+x . 35+x DTLS only: cookie length (1 byte)
* 36+x . .. DTLS only: cookie
* .. . .. ciphersuite list length (2 bytes)
* .. . .. ciphersuite list
* .. . .. compression alg. list length (1 byte)
* .. . .. compression alg. list
* .. . .. extensions length (2 bytes, optional)
* .. . .. extensions (optional)
*/
/*
* Minimal length (with everything empty and extensions omitted) is
* 2 + 32 + 1 + 2 + 1 = 38 bytes. Check that first, so that we can
* read at least up to session id length without worrying.
*/
if( msg_len < 38 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/*
* Check and save the protocol version
*/
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, version", buf, 2 );
mbedtls_ssl_read_version( &ssl->major_ver, &ssl->minor_ver,
ssl->conf->transport, buf );
ssl->handshake->max_major_ver = ssl->major_ver;
ssl->handshake->max_minor_ver = ssl->minor_ver;
if( ssl->major_ver < ssl->conf->min_major_ver ||
ssl->minor_ver < ssl->conf->min_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "client only supports ssl smaller than minimum"
" [%d:%d] < [%d:%d]",
ssl->major_ver, ssl->minor_ver,
ssl->conf->min_major_ver, ssl->conf->min_minor_ver ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_PROTOCOL_VERSION );
return( MBEDTLS_ERR_SSL_BAD_HS_PROTOCOL_VERSION );
}
if( ssl->major_ver > ssl->conf->max_major_ver )
{
ssl->major_ver = ssl->conf->max_major_ver;
ssl->minor_ver = ssl->conf->max_minor_ver;
}
else if( ssl->minor_ver > ssl->conf->max_minor_ver )
ssl->minor_ver = ssl->conf->max_minor_ver;
/*
* Save client random (inc. Unix time)
*/
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, random bytes", buf + 2, 32 );
memcpy( ssl->handshake->randbytes, buf + 2, 32 );
/*
* Check the session ID length and save session ID
*/
sess_len = buf[34];
if( sess_len > sizeof( ssl->session_negotiate->id ) ||
sess_len + 34 + 2 > msg_len ) /* 2 for cipherlist length field */
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, session id", buf + 35, sess_len );
ssl->session_negotiate->id_len = sess_len;
memset( ssl->session_negotiate->id, 0,
sizeof( ssl->session_negotiate->id ) );
memcpy( ssl->session_negotiate->id, buf + 35,
ssl->session_negotiate->id_len );
/*
* Check the cookie length and content
*/
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
cookie_offset = 35 + sess_len;
cookie_len = buf[cookie_offset];
if( cookie_offset + 1 + cookie_len + 2 > msg_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_PROTOCOL_VERSION );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, cookie",
buf + cookie_offset + 1, cookie_len );
#if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY)
if( ssl->conf->f_cookie_check != NULL
#if defined(MBEDTLS_SSL_RENEGOTIATION)
&& ssl->renego_status == MBEDTLS_SSL_INITIAL_HANDSHAKE
#endif
)
{
if( ssl->conf->f_cookie_check( ssl->conf->p_cookie,
buf + cookie_offset + 1, cookie_len,
ssl->cli_id, ssl->cli_id_len ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "cookie verification failed" ) );
ssl->handshake->verify_cookie_len = 1;
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "cookie verification passed" ) );
ssl->handshake->verify_cookie_len = 0;
}
}
else
#endif /* MBEDTLS_SSL_DTLS_HELLO_VERIFY */
{
/* We know we didn't send a cookie, so it should be empty */
if( cookie_len != 0 )
{
/* This may be an attacker's probe, so don't send an alert */
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_MSG( 2, ( "cookie verification skipped" ) );
}
/*
* Check the ciphersuitelist length (will be parsed later)
*/
ciph_offset = cookie_offset + 1 + cookie_len;
}
else
#endif /* MBEDTLS_SSL_PROTO_DTLS */
ciph_offset = 35 + sess_len;
ciph_len = ( buf[ciph_offset + 0] << 8 )
| ( buf[ciph_offset + 1] );
if( ciph_len < 2 ||
ciph_len + 2 + ciph_offset + 1 > msg_len || /* 1 for comp. alg. len */
( ciph_len % 2 ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, ciphersuitelist",
buf + ciph_offset + 2, ciph_len );
/*
* Check the compression algorithms length and pick one
*/
comp_offset = ciph_offset + 2 + ciph_len;
comp_len = buf[comp_offset];
if( comp_len < 1 ||
comp_len > 16 ||
comp_len + comp_offset + 1 > msg_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, compression",
buf + comp_offset + 1, comp_len );
ssl->session_negotiate->compression = MBEDTLS_SSL_COMPRESS_NULL;
#if defined(MBEDTLS_ZLIB_SUPPORT)
for( i = 0; i < comp_len; ++i )
{
if( buf[comp_offset + 1 + i] == MBEDTLS_SSL_COMPRESS_DEFLATE )
{
ssl->session_negotiate->compression = MBEDTLS_SSL_COMPRESS_DEFLATE;
break;
}
}
#endif
/* See comments in ssl_write_client_hello() */
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
ssl->session_negotiate->compression = MBEDTLS_SSL_COMPRESS_NULL;
#endif
/* Do not parse the extensions if the protocol is SSLv3 */
#if defined(MBEDTLS_SSL_PROTO_SSL3)
if( ( ssl->major_ver != 3 ) || ( ssl->minor_ver != 0 ) )
{
#endif
/*
* Check the extension length
*/
ext_offset = comp_offset + 1 + comp_len;
if( msg_len > ext_offset )
{
if( msg_len < ext_offset + 2 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ext_len = ( buf[ext_offset + 0] << 8 )
| ( buf[ext_offset + 1] );
if( ( ext_len > 0 && ext_len < 4 ) ||
msg_len != ext_offset + 2 + ext_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
}
else
ext_len = 0;
ext = buf + ext_offset + 2;
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello extensions", ext, ext_len );
while( ext_len != 0 )
{
unsigned int ext_id;
unsigned int ext_size;
if ( ext_len < 4 ) {
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ext_id = ( ( ext[0] << 8 ) | ( ext[1] ) );
ext_size = ( ( ext[2] << 8 ) | ( ext[3] ) );
if( ext_size + 4 > ext_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
switch( ext_id )
{
#if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION)
case MBEDTLS_TLS_EXT_SERVERNAME:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found ServerName extension" ) );
if( ssl->conf->f_sni == NULL )
break;
ret = ssl_parse_servername_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_SERVER_NAME_INDICATION */
case MBEDTLS_TLS_EXT_RENEGOTIATION_INFO:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found renegotiation extension" ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
renegotiation_info_seen = 1;
#endif
ret = ssl_parse_renegotiation_info( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
case MBEDTLS_TLS_EXT_SIG_ALG:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found signature_algorithms extension" ) );
ret = ssl_parse_signature_algorithms_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
sig_hash_alg_ext_present = 1;
break;
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 &&
MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \
defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
case MBEDTLS_TLS_EXT_SUPPORTED_ELLIPTIC_CURVES:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found supported elliptic curves extension" ) );
ret = ssl_parse_supported_elliptic_curves( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
case MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found supported point formats extension" ) );
ssl->handshake->cli_exts |= MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS_PRESENT;
ret = ssl_parse_supported_point_formats( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_ECDH_C || MBEDTLS_ECDSA_C ||
MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
case MBEDTLS_TLS_EXT_ECJPAKE_KKPP:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found ecjpake kkpp extension" ) );
ret = ssl_parse_ecjpake_kkpp( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH)
case MBEDTLS_TLS_EXT_MAX_FRAGMENT_LENGTH:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found max fragment length extension" ) );
ret = ssl_parse_max_fragment_length_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_MAX_FRAGMENT_LENGTH */
#if defined(MBEDTLS_SSL_TRUNCATED_HMAC)
case MBEDTLS_TLS_EXT_TRUNCATED_HMAC:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found truncated hmac extension" ) );
ret = ssl_parse_truncated_hmac_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_TRUNCATED_HMAC */
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
case MBEDTLS_TLS_EXT_CID:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found CID extension" ) );
ret = ssl_parse_cid_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_TRUNCATED_HMAC */
#if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC)
case MBEDTLS_TLS_EXT_ENCRYPT_THEN_MAC:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found encrypt then mac extension" ) );
ret = ssl_parse_encrypt_then_mac_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_ENCRYPT_THEN_MAC */
#if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET)
case MBEDTLS_TLS_EXT_EXTENDED_MASTER_SECRET:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found extended master secret extension" ) );
ret = ssl_parse_extended_ms_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_EXTENDED_MASTER_SECRET */
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
case MBEDTLS_TLS_EXT_SESSION_TICKET:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found session ticket extension" ) );
ret = ssl_parse_session_ticket_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
#if defined(MBEDTLS_SSL_ALPN)
case MBEDTLS_TLS_EXT_ALPN:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "found alpn extension" ) );
ret = ssl_parse_alpn_ext( ssl, ext + 4, ext_size );
if( ret != 0 )
return( ret );
break;
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
default:
MBEDTLS_SSL_DEBUG_MSG( 3, ( "unknown extension found: %d (ignoring)",
ext_id ) );
}
ext_len -= 4 + ext_size;
ext += 4 + ext_size;
if( ext_len > 0 && ext_len < 4 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
}
#if defined(MBEDTLS_SSL_PROTO_SSL3)
}
#endif
#if defined(MBEDTLS_SSL_FALLBACK_SCSV)
for( i = 0, p = buf + ciph_offset + 2; i < ciph_len; i += 2, p += 2 )
{
if( p[0] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE >> 8 ) & 0xff ) &&
p[1] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE ) & 0xff ) )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "received FALLBACK_SCSV" ) );
if( ssl->minor_ver < ssl->conf->max_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "inapropriate fallback" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_INAPROPRIATE_FALLBACK );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
break;
}
}
#endif /* MBEDTLS_SSL_FALLBACK_SCSV */
#if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
/*
* Try to fall back to default hash SHA1 if the client
* hasn't provided any preferred signature-hash combinations.
*/
if( sig_hash_alg_ext_present == 0 )
{
mbedtls_md_type_t md_default = MBEDTLS_MD_SHA1;
if( mbedtls_ssl_check_sig_hash( ssl, md_default ) != 0 )
md_default = MBEDTLS_MD_NONE;
mbedtls_ssl_sig_hash_set_const_hash( &ssl->handshake->hash_algs, md_default );
}
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 &&
MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
/*
* Check for TLS_EMPTY_RENEGOTIATION_INFO_SCSV
*/
for( i = 0, p = buf + ciph_offset + 2; i < ciph_len; i += 2, p += 2 )
{
if( p[0] == 0 && p[1] == MBEDTLS_SSL_EMPTY_RENEGOTIATION_INFO )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "received TLS_EMPTY_RENEGOTIATION_INFO " ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "received RENEGOTIATION SCSV "
"during renegotiation" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif
ssl->secure_renegotiation = MBEDTLS_SSL_SECURE_RENEGOTIATION;
break;
}
}
/*
* Renegotiation security checks
*/
if( ssl->secure_renegotiation != MBEDTLS_SSL_SECURE_RENEGOTIATION &&
ssl->conf->allow_legacy_renegotiation == MBEDTLS_SSL_LEGACY_BREAK_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "legacy renegotiation, breaking off handshake" ) );
handshake_failure = 1;
}
#if defined(MBEDTLS_SSL_RENEGOTIATION)
else if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS &&
ssl->secure_renegotiation == MBEDTLS_SSL_SECURE_RENEGOTIATION &&
renegotiation_info_seen == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "renegotiation_info extension missing (secure)" ) );
handshake_failure = 1;
}
else if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS &&
ssl->secure_renegotiation == MBEDTLS_SSL_LEGACY_RENEGOTIATION &&
ssl->conf->allow_legacy_renegotiation == MBEDTLS_SSL_LEGACY_NO_RENEGOTIATION )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "legacy renegotiation not allowed" ) );
handshake_failure = 1;
}
else if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS &&
ssl->secure_renegotiation == MBEDTLS_SSL_LEGACY_RENEGOTIATION &&
renegotiation_info_seen == 1 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "renegotiation_info extension present (legacy)" ) );
handshake_failure = 1;
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
if( handshake_failure == 1 )
{
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
/*
* Search for a matching ciphersuite
* (At the end because we need information from the EC-based extensions
* and certificate from the SNI callback triggered by the SNI extension.)
*/
got_common_suite = 0;
ciphersuites = ssl->conf->ciphersuite_list[ssl->minor_ver];
ciphersuite_info = NULL;
#if defined(MBEDTLS_SSL_SRV_RESPECT_CLIENT_PREFERENCE)
for( j = 0, p = buf + ciph_offset + 2; j < ciph_len; j += 2, p += 2 )
for( i = 0; ciphersuites[i] != 0; i++ )
#else
for( i = 0; ciphersuites[i] != 0; i++ )
for( j = 0, p = buf + ciph_offset + 2; j < ciph_len; j += 2, p += 2 )
#endif
{
if( p[0] != ( ( ciphersuites[i] >> 8 ) & 0xFF ) ||
p[1] != ( ( ciphersuites[i] ) & 0xFF ) )
continue;
got_common_suite = 1;
if( ( ret = ssl_ciphersuite_match( ssl, ciphersuites[i],
&ciphersuite_info ) ) != 0 )
return( ret );
if( ciphersuite_info != NULL )
goto have_ciphersuite;
}
if( got_common_suite )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got ciphersuites in common, "
"but none of them usable" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_NO_USABLE_CIPHERSUITE );
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no ciphersuites in common" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_NO_CIPHER_CHOSEN );
}
have_ciphersuite:
MBEDTLS_SSL_DEBUG_MSG( 2, ( "selected ciphersuite: %s", ciphersuite_info->name ) );
ssl->session_negotiate->ciphersuite = ciphersuites[i];
ssl->handshake->ciphersuite_info = ciphersuite_info;
ssl->state++;
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
mbedtls_ssl_recv_flight_completed( ssl );
#endif
/* Debugging-only output for testsuite */
#if defined(MBEDTLS_DEBUG_C) && \
defined(MBEDTLS_SSL_PROTO_TLS1_2) && \
defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED)
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
mbedtls_pk_type_t sig_alg = mbedtls_ssl_get_ciphersuite_sig_alg( ciphersuite_info );
if( sig_alg != MBEDTLS_PK_NONE )
{
mbedtls_md_type_t md_alg = mbedtls_ssl_sig_hash_set_find( &ssl->handshake->hash_algs,
sig_alg );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext: %d",
mbedtls_ssl_hash_from_md_alg( md_alg ) ) );
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "no hash algorithm for signature algorithm "
"%d - should not happen", sig_alg ) );
}
}
#endif
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse client hello" ) );
return( 0 );
}
#if defined(MBEDTLS_SSL_TRUNCATED_HMAC)
static void ssl_write_truncated_hmac_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->session_negotiate->trunc_hmac == MBEDTLS_SSL_TRUNC_HMAC_DISABLED )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding truncated hmac extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_TRUNCATED_HMAC >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_TRUNCATED_HMAC ) & 0xFF );
*p++ = 0x00;
*p++ = 0x00;
*olen = 4;
}
#endif /* MBEDTLS_SSL_TRUNCATED_HMAC */
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
static void ssl_write_cid_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
size_t ext_len;
const unsigned char *end = ssl->out_msg + MBEDTLS_SSL_OUT_CONTENT_LEN;
*olen = 0;
/* Skip writing the extension if we don't want to use it or if
* the client hasn't offered it. */
if( ssl->handshake->cid_in_use == MBEDTLS_SSL_CID_DISABLED )
return;
/* ssl->own_cid_len is at most MBEDTLS_SSL_CID_IN_LEN_MAX
* which is at most 255, so the increment cannot overflow. */
if( end < p || (size_t)( end - p ) < (unsigned)( ssl->own_cid_len + 5 ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "buffer too small" ) );
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding CID extension" ) );
/*
* Quoting draft-ietf-tls-dtls-connection-id-05
* https://tools.ietf.org/html/draft-ietf-tls-dtls-connection-id-05
*
* struct {
* opaque cid<0..2^8-1>;
* } ConnectionId;
*/
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_CID >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_CID ) & 0xFF );
ext_len = (size_t) ssl->own_cid_len + 1;
*p++ = (unsigned char)( ( ext_len >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( ext_len ) & 0xFF );
*p++ = (uint8_t) ssl->own_cid_len;
memcpy( p, ssl->own_cid, ssl->own_cid_len );
*olen = ssl->own_cid_len + 5;
}
#endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */
#if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC)
static void ssl_write_encrypt_then_mac_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
const mbedtls_ssl_ciphersuite_t *suite = NULL;
const mbedtls_cipher_info_t *cipher = NULL;
if( ssl->session_negotiate->encrypt_then_mac == MBEDTLS_SSL_ETM_DISABLED ||
ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_0 )
{
*olen = 0;
return;
}
/*
* RFC 7366: "If a server receives an encrypt-then-MAC request extension
* from a client and then selects a stream or Authenticated Encryption
* with Associated Data (AEAD) ciphersuite, it MUST NOT send an
* encrypt-then-MAC response extension back to the client."
*/
if( ( suite = mbedtls_ssl_ciphersuite_from_id(
ssl->session_negotiate->ciphersuite ) ) == NULL ||
( cipher = mbedtls_cipher_info_from_type( suite->cipher ) ) == NULL ||
cipher->mode != MBEDTLS_MODE_CBC )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding encrypt then mac extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ENCRYPT_THEN_MAC >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ENCRYPT_THEN_MAC ) & 0xFF );
*p++ = 0x00;
*p++ = 0x00;
*olen = 4;
}
#endif /* MBEDTLS_SSL_ENCRYPT_THEN_MAC */
#if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET)
static void ssl_write_extended_ms_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->handshake->extended_ms == MBEDTLS_SSL_EXTENDED_MS_DISABLED ||
ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_0 )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding extended master secret "
"extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_EXTENDED_MASTER_SECRET >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_EXTENDED_MASTER_SECRET ) & 0xFF );
*p++ = 0x00;
*p++ = 0x00;
*olen = 4;
}
#endif /* MBEDTLS_SSL_EXTENDED_MASTER_SECRET */
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
static void ssl_write_session_ticket_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->handshake->new_session_ticket == 0 )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding session ticket extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_SESSION_TICKET >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_SESSION_TICKET ) & 0xFF );
*p++ = 0x00;
*p++ = 0x00;
*olen = 4;
}
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
static void ssl_write_renegotiation_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->secure_renegotiation != MBEDTLS_SSL_SECURE_RENEGOTIATION )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, secure renegotiation extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_RENEGOTIATION_INFO >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_RENEGOTIATION_INFO ) & 0xFF );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
*p++ = 0x00;
*p++ = ( ssl->verify_data_len * 2 + 1 ) & 0xFF;
*p++ = ssl->verify_data_len * 2 & 0xFF;
memcpy( p, ssl->peer_verify_data, ssl->verify_data_len );
p += ssl->verify_data_len;
memcpy( p, ssl->own_verify_data, ssl->verify_data_len );
p += ssl->verify_data_len;
}
else
#endif /* MBEDTLS_SSL_RENEGOTIATION */
{
*p++ = 0x00;
*p++ = 0x01;
*p++ = 0x00;
}
*olen = p - buf;
}
#if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH)
static void ssl_write_max_fragment_length_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->session_negotiate->mfl_code == MBEDTLS_SSL_MAX_FRAG_LEN_NONE )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, max_fragment_length extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_MAX_FRAGMENT_LENGTH >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_MAX_FRAGMENT_LENGTH ) & 0xFF );
*p++ = 0x00;
*p++ = 1;
*p++ = ssl->session_negotiate->mfl_code;
*olen = 5;
}
#endif /* MBEDTLS_SSL_MAX_FRAGMENT_LENGTH */
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \
defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
static void ssl_write_supported_point_formats_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
((void) ssl);
if( ( ssl->handshake->cli_exts &
MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS_PRESENT ) == 0 )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, supported_point_formats extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_SUPPORTED_POINT_FORMATS ) & 0xFF );
*p++ = 0x00;
*p++ = 2;
*p++ = 1;
*p++ = MBEDTLS_ECP_PF_UNCOMPRESSED;
*olen = 6;
}
#endif /* MBEDTLS_ECDH_C || MBEDTLS_ECDSA_C || MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
static void ssl_write_ecjpake_kkpp_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
unsigned char *p = buf;
const unsigned char *end = ssl->out_msg + MBEDTLS_SSL_OUT_CONTENT_LEN;
size_t kkpp_len;
*olen = 0;
/* Skip costly computation if not needed */
if( ssl->handshake->ciphersuite_info->key_exchange !=
MBEDTLS_KEY_EXCHANGE_ECJPAKE )
return;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, ecjpake kkpp extension" ) );
if( end - p < 4 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "buffer too small" ) );
return;
}
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ECJPAKE_KKPP >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ECJPAKE_KKPP ) & 0xFF );
ret = mbedtls_ecjpake_write_round_one( &ssl->handshake->ecjpake_ctx,
p + 2, end - p - 2, &kkpp_len,
ssl->conf->f_rng, ssl->conf->p_rng );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1 , "mbedtls_ecjpake_write_round_one", ret );
return;
}
*p++ = (unsigned char)( ( kkpp_len >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( kkpp_len ) & 0xFF );
*olen = kkpp_len + 4;
}
#endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
#if defined(MBEDTLS_SSL_ALPN )
static void ssl_write_alpn_ext( mbedtls_ssl_context *ssl,
unsigned char *buf, size_t *olen )
{
if( ssl->alpn_chosen == NULL )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding alpn extension" ) );
/*
* 0 . 1 ext identifier
* 2 . 3 ext length
* 4 . 5 protocol list length
* 6 . 6 protocol name length
* 7 . 7+n protocol name
*/
buf[0] = (unsigned char)( ( MBEDTLS_TLS_EXT_ALPN >> 8 ) & 0xFF );
buf[1] = (unsigned char)( ( MBEDTLS_TLS_EXT_ALPN ) & 0xFF );
*olen = 7 + strlen( ssl->alpn_chosen );
buf[2] = (unsigned char)( ( ( *olen - 4 ) >> 8 ) & 0xFF );
buf[3] = (unsigned char)( ( ( *olen - 4 ) ) & 0xFF );
buf[4] = (unsigned char)( ( ( *olen - 6 ) >> 8 ) & 0xFF );
buf[5] = (unsigned char)( ( ( *olen - 6 ) ) & 0xFF );
buf[6] = (unsigned char)( ( ( *olen - 7 ) ) & 0xFF );
memcpy( buf + 7, ssl->alpn_chosen, *olen - 7 );
}
#endif /* MBEDTLS_ECDH_C || MBEDTLS_ECDSA_C */
#if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY)
static int ssl_write_hello_verify_request( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
unsigned char *p = ssl->out_msg + 4;
unsigned char *cookie_len_byte;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write hello verify request" ) );
/*
* struct {
* ProtocolVersion server_version;
* opaque cookie<0..2^8-1>;
* } HelloVerifyRequest;
*/
/* The RFC is not clear on this point, but sending the actual negotiated
* version looks like the most interoperable thing to do. */
mbedtls_ssl_write_version( ssl->major_ver, ssl->minor_ver,
ssl->conf->transport, p );
MBEDTLS_SSL_DEBUG_BUF( 3, "server version", p, 2 );
p += 2;
/* If we get here, f_cookie_check is not null */
if( ssl->conf->f_cookie_write == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "inconsistent cookie callbacks" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
/* Skip length byte until we know the length */
cookie_len_byte = p++;
if( ( ret = ssl->conf->f_cookie_write( ssl->conf->p_cookie,
&p, ssl->out_buf + MBEDTLS_SSL_OUT_BUFFER_LEN,
ssl->cli_id, ssl->cli_id_len ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "f_cookie_write", ret );
return( ret );
}
*cookie_len_byte = (unsigned char)( p - ( cookie_len_byte + 1 ) );
MBEDTLS_SSL_DEBUG_BUF( 3, "cookie sent", cookie_len_byte + 1, *cookie_len_byte );
ssl->out_msglen = p - ssl->out_msg;
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_HELLO_VERIFY_REQUEST;
ssl->state = MBEDTLS_SSL_SERVER_HELLO_VERIFY_REQUEST_SENT;
if( ( ret = mbedtls_ssl_write_handshake_msg( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_write_handshake_msg", ret );
return( ret );
}
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
( ret = mbedtls_ssl_flight_transmit( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_flight_transmit", ret );
return( ret );
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write hello verify request" ) );
return( 0 );
}
#endif /* MBEDTLS_SSL_DTLS_HELLO_VERIFY */
static int ssl_write_server_hello( mbedtls_ssl_context *ssl )
{
#if defined(MBEDTLS_HAVE_TIME)
mbedtls_time_t t;
#endif
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t olen, ext_len = 0, n;
unsigned char *buf, *p;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write server hello" ) );
#if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
ssl->handshake->verify_cookie_len != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "client hello was not authenticated" ) );
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write server hello" ) );
return( ssl_write_hello_verify_request( ssl ) );
}
#endif /* MBEDTLS_SSL_DTLS_HELLO_VERIFY */
if( ssl->conf->f_rng == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "no RNG provided") );
return( MBEDTLS_ERR_SSL_NO_RNG );
}
/*
* 0 . 0 handshake type
* 1 . 3 handshake length
* 4 . 5 protocol version
* 6 . 9 UNIX time()
* 10 . 37 random bytes
*/
buf = ssl->out_msg;
p = buf + 4;
mbedtls_ssl_write_version( ssl->major_ver, ssl->minor_ver,
ssl->conf->transport, p );
p += 2;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, chosen version: [%d:%d]",
buf[4], buf[5] ) );
#if defined(MBEDTLS_HAVE_TIME)
t = mbedtls_time( NULL );
*p++ = (unsigned char)( t >> 24 );
*p++ = (unsigned char)( t >> 16 );
*p++ = (unsigned char)( t >> 8 );
*p++ = (unsigned char)( t );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, current time: %lu", t ) );
#else
if( ( ret = ssl->conf->f_rng( ssl->conf->p_rng, p, 4 ) ) != 0 )
return( ret );
p += 4;
#endif /* MBEDTLS_HAVE_TIME */
if( ( ret = ssl->conf->f_rng( ssl->conf->p_rng, p, 28 ) ) != 0 )
return( ret );
p += 28;
memcpy( ssl->handshake->randbytes + 32, buf + 6, 32 );
MBEDTLS_SSL_DEBUG_BUF( 3, "server hello, random bytes", buf + 6, 32 );
/*
* Resume is 0 by default, see ssl_handshake_init().
* It may be already set to 1 by ssl_parse_session_ticket_ext().
* If not, try looking up session ID in our cache.
*/
if( ssl->handshake->resume == 0 &&
#if defined(MBEDTLS_SSL_RENEGOTIATION)
ssl->renego_status == MBEDTLS_SSL_INITIAL_HANDSHAKE &&
#endif
ssl->session_negotiate->id_len != 0 &&
ssl->conf->f_get_cache != NULL &&
ssl->conf->f_get_cache( ssl->conf->p_cache, ssl->session_negotiate ) == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "session successfully restored from cache" ) );
ssl->handshake->resume = 1;
}
if( ssl->handshake->resume == 0 )
{
/*
* New session, create a new session id,
* unless we're about to issue a session ticket
*/
ssl->state++;
#if defined(MBEDTLS_HAVE_TIME)
ssl->session_negotiate->start = mbedtls_time( NULL );
#endif
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
if( ssl->handshake->new_session_ticket != 0 )
{
ssl->session_negotiate->id_len = n = 0;
memset( ssl->session_negotiate->id, 0, 32 );
}
else
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
{
ssl->session_negotiate->id_len = n = 32;
if( ( ret = ssl->conf->f_rng( ssl->conf->p_rng, ssl->session_negotiate->id,
n ) ) != 0 )
return( ret );
}
}
else
{
/*
* Resuming a session
*/
n = ssl->session_negotiate->id_len;
ssl->state = MBEDTLS_SSL_SERVER_CHANGE_CIPHER_SPEC;
if( ( ret = mbedtls_ssl_derive_keys( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_derive_keys", ret );
return( ret );
}
}
/*
* 38 . 38 session id length
* 39 . 38+n session id
* 39+n . 40+n chosen ciphersuite
* 41+n . 41+n chosen compression alg.
* 42+n . 43+n extensions length
* 44+n . 43+n+m extensions
*/
*p++ = (unsigned char) ssl->session_negotiate->id_len;
memcpy( p, ssl->session_negotiate->id, ssl->session_negotiate->id_len );
p += ssl->session_negotiate->id_len;
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, session id len.: %d", n ) );
MBEDTLS_SSL_DEBUG_BUF( 3, "server hello, session id", buf + 39, n );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "%s session has been resumed",
ssl->handshake->resume ? "a" : "no" ) );
*p++ = (unsigned char)( ssl->session_negotiate->ciphersuite >> 8 );
*p++ = (unsigned char)( ssl->session_negotiate->ciphersuite );
*p++ = (unsigned char)( ssl->session_negotiate->compression );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, chosen ciphersuite: %s",
mbedtls_ssl_get_ciphersuite_name( ssl->session_negotiate->ciphersuite ) ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, compress alg.: 0x%02X",
ssl->session_negotiate->compression ) );
/* Do not write the extensions if the protocol is SSLv3 */
#if defined(MBEDTLS_SSL_PROTO_SSL3)
if( ( ssl->major_ver != 3 ) || ( ssl->minor_ver != 0 ) )
{
#endif
/*
* First write extensions, then the total length
*/
ssl_write_renegotiation_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH)
ssl_write_max_fragment_length_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_TRUNCATED_HMAC)
ssl_write_truncated_hmac_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
ssl_write_cid_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC)
ssl_write_encrypt_then_mac_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET)
ssl_write_extended_ms_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
ssl_write_session_ticket_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \
defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
if ( mbedtls_ssl_ciphersuite_uses_ec(
mbedtls_ssl_ciphersuite_from_id( ssl->session_negotiate->ciphersuite ) ) )
{
ssl_write_supported_point_formats_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
}
#endif
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
ssl_write_ecjpake_kkpp_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
#if defined(MBEDTLS_SSL_ALPN)
ssl_write_alpn_ext( ssl, p + 2 + ext_len, &olen );
ext_len += olen;
#endif
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, total extension length: %d", ext_len ) );
if( ext_len > 0 )
{
*p++ = (unsigned char)( ( ext_len >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( ext_len ) & 0xFF );
p += ext_len;
}
#if defined(MBEDTLS_SSL_PROTO_SSL3)
}
#endif
ssl->out_msglen = p - buf;
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_SERVER_HELLO;
ret = mbedtls_ssl_write_handshake_msg( ssl );
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write server hello" ) );
return( ret );
}
#if !defined(MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED)
static int ssl_write_certificate_request( mbedtls_ssl_context *ssl )
{
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write certificate request" ) );
if( !mbedtls_ssl_ciphersuite_cert_req_allowed( ciphersuite_info ) )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip write certificate request" ) );
ssl->state++;
return( 0 );
}
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
#else /* !MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */
static int ssl_write_certificate_request( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
uint16_t dn_size, total_dn_size; /* excluding length bytes */
size_t ct_len, sa_len; /* including length bytes */
unsigned char *buf, *p;
const unsigned char * const end = ssl->out_msg + MBEDTLS_SSL_OUT_CONTENT_LEN;
const mbedtls_x509_crt *crt;
int authmode;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write certificate request" ) );
ssl->state++;
#if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION)
if( ssl->handshake->sni_authmode != MBEDTLS_SSL_VERIFY_UNSET )
authmode = ssl->handshake->sni_authmode;
else
#endif
authmode = ssl->conf->authmode;
if( !mbedtls_ssl_ciphersuite_cert_req_allowed( ciphersuite_info ) ||
authmode == MBEDTLS_SSL_VERIFY_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip write certificate request" ) );
return( 0 );
}
/*
* 0 . 0 handshake type
* 1 . 3 handshake length
* 4 . 4 cert type count
* 5 .. m-1 cert types
* m .. m+1 sig alg length (TLS 1.2 only)
* m+1 .. n-1 SignatureAndHashAlgorithms (TLS 1.2 only)
* n .. n+1 length of all DNs
* n+2 .. n+3 length of DN 1
* n+4 .. ... Distinguished Name #1
* ... .. ... length of DN 2, etc.
*/
buf = ssl->out_msg;
p = buf + 4;
/*
* Supported certificate types
*
* ClientCertificateType certificate_types<1..2^8-1>;
* enum { (255) } ClientCertificateType;
*/
ct_len = 0;
#if defined(MBEDTLS_RSA_C)
p[1 + ct_len++] = MBEDTLS_SSL_CERT_TYPE_RSA_SIGN;
#endif
#if defined(MBEDTLS_ECDSA_C)
p[1 + ct_len++] = MBEDTLS_SSL_CERT_TYPE_ECDSA_SIGN;
#endif
p[0] = (unsigned char) ct_len++;
p += ct_len;
sa_len = 0;
#if defined(MBEDTLS_SSL_PROTO_TLS1_2)
/*
* Add signature_algorithms for verify (TLS 1.2)
*
* SignatureAndHashAlgorithm supported_signature_algorithms<2..2^16-2>;
*
* struct {
* HashAlgorithm hash;
* SignatureAlgorithm signature;
* } SignatureAndHashAlgorithm;
*
* enum { (255) } HashAlgorithm;
* enum { (255) } SignatureAlgorithm;
*/
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
const int *cur;
/*
* Supported signature algorithms
*/
for( cur = ssl->conf->sig_hashes; *cur != MBEDTLS_MD_NONE; cur++ )
{
unsigned char hash = mbedtls_ssl_hash_from_md_alg( *cur );
if( MBEDTLS_SSL_HASH_NONE == hash || mbedtls_ssl_set_calc_verify_md( ssl, hash ) )
continue;
#if defined(MBEDTLS_RSA_C)
p[2 + sa_len++] = hash;
p[2 + sa_len++] = MBEDTLS_SSL_SIG_RSA;
#endif
#if defined(MBEDTLS_ECDSA_C)
p[2 + sa_len++] = hash;
p[2 + sa_len++] = MBEDTLS_SSL_SIG_ECDSA;
#endif
}
p[0] = (unsigned char)( sa_len >> 8 );
p[1] = (unsigned char)( sa_len );
sa_len += 2;
p += sa_len;
}
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 */
/*
* DistinguishedName certificate_authorities<0..2^16-1>;
* opaque DistinguishedName<1..2^16-1>;
*/
p += 2;
total_dn_size = 0;
if( ssl->conf->cert_req_ca_list == MBEDTLS_SSL_CERT_REQ_CA_LIST_ENABLED )
{
/* NOTE: If trusted certificates are provisioned
* via a CA callback (configured through
* `mbedtls_ssl_conf_ca_cb()`, then the
* CertificateRequest is currently left empty. */
#if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION)
if( ssl->handshake->sni_ca_chain != NULL )
crt = ssl->handshake->sni_ca_chain;
else
#endif
crt = ssl->conf->ca_chain;
while( crt != NULL && crt->version != 0 )
{
/* It follows from RFC 5280 A.1 that this length
* can be represented in at most 11 bits. */
dn_size = (uint16_t) crt->subject_raw.len;
if( end < p || (size_t)( end - p ) < 2 + (size_t) dn_size )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "skipping CAs: buffer too short" ) );
break;
}
*p++ = (unsigned char)( dn_size >> 8 );
*p++ = (unsigned char)( dn_size );
memcpy( p, crt->subject_raw.p, dn_size );
p += dn_size;
MBEDTLS_SSL_DEBUG_BUF( 3, "requested DN", p - dn_size, dn_size );
total_dn_size += 2 + dn_size;
crt = crt->next;
}
}
ssl->out_msglen = p - buf;
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_CERTIFICATE_REQUEST;
ssl->out_msg[4 + ct_len + sa_len] = (unsigned char)( total_dn_size >> 8 );
ssl->out_msg[5 + ct_len + sa_len] = (unsigned char)( total_dn_size );
ret = mbedtls_ssl_write_handshake_msg( ssl );
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write certificate request" ) );
return( ret );
}
#endif /* MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED)
static int ssl_get_ecdh_params_from_cert( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
if( ! mbedtls_pk_can_do( mbedtls_ssl_own_key( ssl ), MBEDTLS_PK_ECKEY ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "server key not ECDH capable" ) );
return( MBEDTLS_ERR_SSL_PK_TYPE_MISMATCH );
}
if( ( ret = mbedtls_ecdh_get_params( &ssl->handshake->ecdh_ctx,
mbedtls_pk_ec( *mbedtls_ssl_own_key( ssl ) ),
MBEDTLS_ECDH_OURS ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "mbedtls_ecdh_get_params" ), ret );
return( ret );
}
return( 0 );
}
#endif /* MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) ||
MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED) && \
defined(MBEDTLS_SSL_ASYNC_PRIVATE)
static int ssl_resume_server_key_exchange( mbedtls_ssl_context *ssl,
size_t *signature_len )
{
/* Append the signature to ssl->out_msg, leaving 2 bytes for the
* signature length which will be added in ssl_write_server_key_exchange
* after the call to ssl_prepare_server_key_exchange.
* ssl_write_server_key_exchange also takes care of incrementing
* ssl->out_msglen. */
unsigned char *sig_start = ssl->out_msg + ssl->out_msglen + 2;
size_t sig_max_len = ( ssl->out_buf + MBEDTLS_SSL_OUT_CONTENT_LEN
- sig_start );
int ret = ssl->conf->f_async_resume( ssl,
sig_start, signature_len, sig_max_len );
if( ret != MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS )
{
ssl->handshake->async_in_progress = 0;
mbedtls_ssl_set_async_operation_data( ssl, NULL );
}
MBEDTLS_SSL_DEBUG_RET( 2, "ssl_resume_server_key_exchange", ret );
return( ret );
}
#endif /* defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED) &&
defined(MBEDTLS_SSL_ASYNC_PRIVATE) */
/* Prepare the ServerKeyExchange message, up to and including
* calculating the signature if any, but excluding formatting the
* signature and sending the message. */
static int ssl_prepare_server_key_exchange( mbedtls_ssl_context *ssl,
size_t *signature_len )
{
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_PFS_ENABLED)
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
unsigned char *dig_signed = NULL;
#endif /* MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED */
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_PFS_ENABLED */
(void) ciphersuite_info; /* unused in some configurations */
#if !defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
(void) signature_len;
#endif /* MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED */
ssl->out_msglen = 4; /* header (type:1, length:3) to be written later */
/*
*
* Part 1: Provide key exchange parameters for chosen ciphersuite.
*
*/
/*
* - ECJPAKE key exchanges
*/
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECJPAKE )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t len = 0;
ret = mbedtls_ecjpake_write_round_two(
&ssl->handshake->ecjpake_ctx,
ssl->out_msg + ssl->out_msglen,
MBEDTLS_SSL_OUT_CONTENT_LEN - ssl->out_msglen, &len,
ssl->conf->f_rng, ssl->conf->p_rng );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecjpake_write_round_two", ret );
return( ret );
}
ssl->out_msglen += len;
}
#endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
/*
* For (EC)DHE key exchanges with PSK, parameters are prefixed by support
* identity hint (RFC 4279, Sec. 3). Until someone needs this feature,
* we use empty support identity hints here.
**/
#if defined(MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_DHE_PSK ||
ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_PSK )
{
ssl->out_msg[ssl->out_msglen++] = 0x00;
ssl->out_msg[ssl->out_msglen++] = 0x00;
}
#endif /* MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED ||
MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED */
/*
* - DHE key exchanges
*/
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_DHE_ENABLED)
if( mbedtls_ssl_ciphersuite_uses_dhe( ciphersuite_info ) )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t len = 0;
if( ssl->conf->dhm_P.p == NULL || ssl->conf->dhm_G.p == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "no DH parameters set" ) );
return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA );
}
/*
* Ephemeral DH parameters:
*
* struct {
* opaque dh_p<1..2^16-1>;
* opaque dh_g<1..2^16-1>;
* opaque dh_Ys<1..2^16-1>;
* } ServerDHParams;
*/
if( ( ret = mbedtls_dhm_set_group( &ssl->handshake->dhm_ctx,
&ssl->conf->dhm_P,
&ssl->conf->dhm_G ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_dhm_set_group", ret );
return( ret );
}
if( ( ret = mbedtls_dhm_make_params(
&ssl->handshake->dhm_ctx,
(int) mbedtls_mpi_size( &ssl->handshake->dhm_ctx.P ),
ssl->out_msg + ssl->out_msglen, &len,
ssl->conf->f_rng, ssl->conf->p_rng ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_dhm_make_params", ret );
return( ret );
}
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
dig_signed = ssl->out_msg + ssl->out_msglen;
#endif
ssl->out_msglen += len;
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: X ", &ssl->handshake->dhm_ctx.X );
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: P ", &ssl->handshake->dhm_ctx.P );
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: G ", &ssl->handshake->dhm_ctx.G );
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: GX", &ssl->handshake->dhm_ctx.GX );
}
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_DHE_ENABLED */
/*
* - ECDHE key exchanges
*/
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_ECDHE_ENABLED)
if( mbedtls_ssl_ciphersuite_uses_ecdhe( ciphersuite_info ) )
{
/*
* Ephemeral ECDH parameters:
*
* struct {
* ECParameters curve_params;
* ECPoint public;
* } ServerECDHParams;
*/
const mbedtls_ecp_curve_info **curve = NULL;
const mbedtls_ecp_group_id *gid;
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t len = 0;
/* Match our preference list against the offered curves */
for( gid = ssl->conf->curve_list; *gid != MBEDTLS_ECP_DP_NONE; gid++ )
for( curve = ssl->handshake->curves; *curve != NULL; curve++ )
if( (*curve)->grp_id == *gid )
goto curve_matching_done;
curve_matching_done:
if( curve == NULL || *curve == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "no matching curve for ECDHE" ) );
return( MBEDTLS_ERR_SSL_NO_CIPHER_CHOSEN );
}
MBEDTLS_SSL_DEBUG_MSG( 2, ( "ECDHE curve: %s", (*curve)->name ) );
if( ( ret = mbedtls_ecdh_setup( &ssl->handshake->ecdh_ctx,
(*curve)->grp_id ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecp_group_load", ret );
return( ret );
}
if( ( ret = mbedtls_ecdh_make_params(
&ssl->handshake->ecdh_ctx, &len,
ssl->out_msg + ssl->out_msglen,
MBEDTLS_SSL_OUT_CONTENT_LEN - ssl->out_msglen,
ssl->conf->f_rng, ssl->conf->p_rng ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecdh_make_params", ret );
return( ret );
}
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
dig_signed = ssl->out_msg + ssl->out_msglen;
#endif
ssl->out_msglen += len;
MBEDTLS_SSL_DEBUG_ECDH( 3, &ssl->handshake->ecdh_ctx,
MBEDTLS_DEBUG_ECDH_Q );
}
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_ECDHE_ENABLED */
/*
*
* Part 2: For key exchanges involving the server signing the
* exchange parameters, compute and add the signature here.
*
*/
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
if( mbedtls_ssl_ciphersuite_uses_server_signature( ciphersuite_info ) )
{
size_t dig_signed_len = ssl->out_msg + ssl->out_msglen - dig_signed;
size_t hashlen = 0;
unsigned char hash[MBEDTLS_MD_MAX_SIZE];
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
/*
* 2.1: Choose hash algorithm:
* A: For TLS 1.2, obey signature-hash-algorithm extension
* to choose appropriate hash.
* B: For SSL3, TLS1.0, TLS1.1 and ECDHE_ECDSA, use SHA1
* (RFC 4492, Sec. 5.4)
* C: Otherwise, use MD5 + SHA1 (RFC 4346, Sec. 7.4.3)
*/
mbedtls_md_type_t md_alg;
#if defined(MBEDTLS_SSL_PROTO_TLS1_2)
mbedtls_pk_type_t sig_alg =
mbedtls_ssl_get_ciphersuite_sig_pk_alg( ciphersuite_info );
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
/* A: For TLS 1.2, obey signature-hash-algorithm extension
* (RFC 5246, Sec. 7.4.1.4.1). */
if( sig_alg == MBEDTLS_PK_NONE ||
( md_alg = mbedtls_ssl_sig_hash_set_find( &ssl->handshake->hash_algs,
sig_alg ) ) == MBEDTLS_MD_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
/* (... because we choose a cipher suite
* only if there is a matching hash.) */
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
}
else
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 */
#if defined(MBEDTLS_SSL_PROTO_SSL3) || defined(MBEDTLS_SSL_PROTO_TLS1) || \
defined(MBEDTLS_SSL_PROTO_TLS1_1)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA )
{
/* B: Default hash SHA1 */
md_alg = MBEDTLS_MD_SHA1;
}
else
#endif /* MBEDTLS_SSL_PROTO_SSL3 || MBEDTLS_SSL_PROTO_TLS1 || \
MBEDTLS_SSL_PROTO_TLS1_1 */
{
/* C: MD5 + SHA1 */
md_alg = MBEDTLS_MD_NONE;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "pick hash algorithm %d for signing", md_alg ) );
/*
* 2.2: Compute the hash to be signed
*/
#if defined(MBEDTLS_SSL_PROTO_SSL3) || defined(MBEDTLS_SSL_PROTO_TLS1) || \
defined(MBEDTLS_SSL_PROTO_TLS1_1)
if( md_alg == MBEDTLS_MD_NONE )
{
hashlen = 36;
ret = mbedtls_ssl_get_key_exchange_md_ssl_tls( ssl, hash,
dig_signed,
dig_signed_len );
if( ret != 0 )
return( ret );
}
else
#endif /* MBEDTLS_SSL_PROTO_SSL3 || MBEDTLS_SSL_PROTO_TLS1 || \
MBEDTLS_SSL_PROTO_TLS1_1 */
#if defined(MBEDTLS_SSL_PROTO_TLS1) || defined(MBEDTLS_SSL_PROTO_TLS1_1) || \
defined(MBEDTLS_SSL_PROTO_TLS1_2)
if( md_alg != MBEDTLS_MD_NONE )
{
ret = mbedtls_ssl_get_key_exchange_md_tls1_2( ssl, hash, &hashlen,
dig_signed,
dig_signed_len,
md_alg );
if( ret != 0 )
return( ret );
}
else
#endif /* MBEDTLS_SSL_PROTO_TLS1 || MBEDTLS_SSL_PROTO_TLS1_1 || \
MBEDTLS_SSL_PROTO_TLS1_2 */
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "parameters hash", hash, hashlen );
/*
* 2.3: Compute and add the signature
*/
#if defined(MBEDTLS_SSL_PROTO_TLS1_2)
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
/*
* For TLS 1.2, we need to specify signature and hash algorithm
* explicitly through a prefix to the signature.
*
* struct {
* HashAlgorithm hash;
* SignatureAlgorithm signature;
* } SignatureAndHashAlgorithm;
*
* struct {
* SignatureAndHashAlgorithm algorithm;
* opaque signature<0..2^16-1>;
* } DigitallySigned;
*
*/
ssl->out_msg[ssl->out_msglen++] =
mbedtls_ssl_hash_from_md_alg( md_alg );
ssl->out_msg[ssl->out_msglen++] =
mbedtls_ssl_sig_from_pk_alg( sig_alg );
}
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 */
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
if( ssl->conf->f_async_sign_start != NULL )
{
ret = ssl->conf->f_async_sign_start( ssl,
mbedtls_ssl_own_cert( ssl ),
md_alg, hash, hashlen );
switch( ret )
{
case MBEDTLS_ERR_SSL_HW_ACCEL_FALLTHROUGH:
/* act as if f_async_sign was null */
break;
case 0:
ssl->handshake->async_in_progress = 1;
return( ssl_resume_server_key_exchange( ssl, signature_len ) );
case MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS:
ssl->handshake->async_in_progress = 1;
return( MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS );
default:
MBEDTLS_SSL_DEBUG_RET( 1, "f_async_sign_start", ret );
return( ret );
}
}
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
if( mbedtls_ssl_own_key( ssl ) == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no private key" ) );
return( MBEDTLS_ERR_SSL_PRIVATE_KEY_REQUIRED );
}
/* Append the signature to ssl->out_msg, leaving 2 bytes for the
* signature length which will be added in ssl_write_server_key_exchange
* after the call to ssl_prepare_server_key_exchange.
* ssl_write_server_key_exchange also takes care of incrementing
* ssl->out_msglen. */
if( ( ret = mbedtls_pk_sign( mbedtls_ssl_own_key( ssl ),
md_alg, hash, hashlen,
ssl->out_msg + ssl->out_msglen + 2,
signature_len,
ssl->conf->f_rng,
ssl->conf->p_rng ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_pk_sign", ret );
return( ret );
}
}
#endif /* MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED */
return( 0 );
}
/* Prepare the ServerKeyExchange message and send it. For ciphersuites
* that do not include a ServerKeyExchange message, do nothing. Either
* way, if successful, move on to the next step in the SSL state
* machine. */
static int ssl_write_server_key_exchange( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t signature_len = 0;
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_NON_PFS_ENABLED)
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_NON_PFS_ENABLED */
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write server key exchange" ) );
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_NON_PFS_ENABLED)
/* Extract static ECDH parameters and abort if ServerKeyExchange
* is not needed. */
if( mbedtls_ssl_ciphersuite_no_pfs( ciphersuite_info ) )
{
/* For suites involving ECDH, extract DH parameters
* from certificate at this point. */
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_ECDH_ENABLED)
if( mbedtls_ssl_ciphersuite_uses_ecdh( ciphersuite_info ) )
{
ssl_get_ecdh_params_from_cert( ssl );
}
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_ECDH_ENABLED */
/* Key exchanges not involving ephemeral keys don't use
* ServerKeyExchange, so end here. */
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip write server key exchange" ) );
ssl->state++;
return( 0 );
}
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_NON_PFS_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED) && \
defined(MBEDTLS_SSL_ASYNC_PRIVATE)
/* If we have already prepared the message and there is an ongoing
* signature operation, resume signing. */
if( ssl->handshake->async_in_progress != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "resuming signature operation" ) );
ret = ssl_resume_server_key_exchange( ssl, &signature_len );
}
else
#endif /* defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED) &&
defined(MBEDTLS_SSL_ASYNC_PRIVATE) */
{
/* ServerKeyExchange is needed. Prepare the message. */
ret = ssl_prepare_server_key_exchange( ssl, &signature_len );
}
if( ret != 0 )
{
/* If we're starting to write a new message, set ssl->out_msglen
* to 0. But if we're resuming after an asynchronous message,
* out_msglen is the amount of data written so far and mst be
* preserved. */
if( ret == MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS )
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write server key exchange (pending)" ) );
else
ssl->out_msglen = 0;
return( ret );
}
/* If there is a signature, write its length.
* ssl_prepare_server_key_exchange already wrote the signature
* itself at its proper place in the output buffer. */
#if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED)
if( signature_len != 0 )
{
ssl->out_msg[ssl->out_msglen++] = (unsigned char)( signature_len >> 8 );
ssl->out_msg[ssl->out_msglen++] = (unsigned char)( signature_len );
MBEDTLS_SSL_DEBUG_BUF( 3, "my signature",
ssl->out_msg + ssl->out_msglen,
signature_len );
/* Skip over the already-written signature */
ssl->out_msglen += signature_len;
}
#endif /* MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED */
/* Add header and send. */
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_SERVER_KEY_EXCHANGE;
ssl->state++;
if( ( ret = mbedtls_ssl_write_handshake_msg( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_write_handshake_msg", ret );
return( ret );
}
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write server key exchange" ) );
return( 0 );
}
static int ssl_write_server_hello_done( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write server hello done" ) );
ssl->out_msglen = 4;
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_SERVER_HELLO_DONE;
ssl->state++;
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
mbedtls_ssl_send_flight_completed( ssl );
#endif
if( ( ret = mbedtls_ssl_write_handshake_msg( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_write_handshake_msg", ret );
return( ret );
}
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
( ret = mbedtls_ssl_flight_transmit( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_flight_transmit", ret );
return( ret );
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write server hello done" ) );
return( 0 );
}
#if defined(MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED)
static int ssl_parse_client_dh_public( mbedtls_ssl_context *ssl, unsigned char **p,
const unsigned char *end )
{
int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE;
size_t n;
/*
* Receive G^Y mod P, premaster = (G^Y)^X mod P
*/
if( *p + 2 > end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
n = ( (*p)[0] << 8 ) | (*p)[1];
*p += 2;
if( *p + n > end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ( ret = mbedtls_dhm_read_public( &ssl->handshake->dhm_ctx, *p, n ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_dhm_read_public", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE_RP );
}
*p += n;
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: GY", &ssl->handshake->dhm_ctx.GY );
return( ret );
}
#endif /* MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED ||
MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED)
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
static int ssl_resume_decrypt_pms( mbedtls_ssl_context *ssl,
unsigned char *peer_pms,
size_t *peer_pmslen,
size_t peer_pmssize )
{
int ret = ssl->conf->f_async_resume( ssl,
peer_pms, peer_pmslen, peer_pmssize );
if( ret != MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS )
{
ssl->handshake->async_in_progress = 0;
mbedtls_ssl_set_async_operation_data( ssl, NULL );
}
MBEDTLS_SSL_DEBUG_RET( 2, "ssl_decrypt_encrypted_pms", ret );
return( ret );
}
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
static int ssl_decrypt_encrypted_pms( mbedtls_ssl_context *ssl,
const unsigned char *p,
const unsigned char *end,
unsigned char *peer_pms,
size_t *peer_pmslen,
size_t peer_pmssize )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
mbedtls_pk_context *private_key = mbedtls_ssl_own_key( ssl );
mbedtls_pk_context *public_key = &mbedtls_ssl_own_cert( ssl )->pk;
size_t len = mbedtls_pk_get_len( public_key );
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
/* If we have already started decoding the message and there is an ongoing
* decryption operation, resume signing. */
if( ssl->handshake->async_in_progress != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "resuming decryption operation" ) );
return( ssl_resume_decrypt_pms( ssl,
peer_pms, peer_pmslen, peer_pmssize ) );
}
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
/*
* Prepare to decrypt the premaster using own private RSA key
*/
#if defined(MBEDTLS_SSL_PROTO_TLS1) || defined(MBEDTLS_SSL_PROTO_TLS1_1) || \
defined(MBEDTLS_SSL_PROTO_TLS1_2)
if( ssl->minor_ver != MBEDTLS_SSL_MINOR_VERSION_0 )
{
if ( p + 2 > end ) {
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( *p++ != ( ( len >> 8 ) & 0xFF ) ||
*p++ != ( ( len ) & 0xFF ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
}
#endif
if( p + len != end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
/*
* Decrypt the premaster secret
*/
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
if( ssl->conf->f_async_decrypt_start != NULL )
{
ret = ssl->conf->f_async_decrypt_start( ssl,
mbedtls_ssl_own_cert( ssl ),
p, len );
switch( ret )
{
case MBEDTLS_ERR_SSL_HW_ACCEL_FALLTHROUGH:
/* act as if f_async_decrypt_start was null */
break;
case 0:
ssl->handshake->async_in_progress = 1;
return( ssl_resume_decrypt_pms( ssl,
peer_pms,
peer_pmslen,
peer_pmssize ) );
case MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS:
ssl->handshake->async_in_progress = 1;
return( MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS );
default:
MBEDTLS_SSL_DEBUG_RET( 1, "f_async_decrypt_start", ret );
return( ret );
}
}
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
if( ! mbedtls_pk_can_do( private_key, MBEDTLS_PK_RSA ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no RSA private key" ) );
return( MBEDTLS_ERR_SSL_PRIVATE_KEY_REQUIRED );
}
ret = mbedtls_pk_decrypt( private_key, p, len,
peer_pms, peer_pmslen, peer_pmssize,
ssl->conf->f_rng, ssl->conf->p_rng );
return( ret );
}
static int ssl_parse_encrypted_pms( mbedtls_ssl_context *ssl,
const unsigned char *p,
const unsigned char *end,
size_t pms_offset )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
unsigned char *pms = ssl->handshake->premaster + pms_offset;
unsigned char ver[2];
unsigned char fake_pms[48], peer_pms[48];
unsigned char mask;
size_t i, peer_pmslen;
unsigned int diff;
/* In case of a failure in decryption, the decryption may write less than
* 2 bytes of output, but we always read the first two bytes. It doesn't
* matter in the end because diff will be nonzero in that case due to
* peer_pmslen being less than 48, and we only care whether diff is 0.
* But do initialize peer_pms for robustness anyway. This also makes
* memory analyzers happy (don't access uninitialized memory, even
* if it's an unsigned char). */
peer_pms[0] = peer_pms[1] = ~0;
ret = ssl_decrypt_encrypted_pms( ssl, p, end,
peer_pms,
&peer_pmslen,
sizeof( peer_pms ) );
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
if ( ret == MBEDTLS_ERR_SSL_ASYNC_IN_PROGRESS )
return( ret );
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
mbedtls_ssl_write_version( ssl->handshake->max_major_ver,
ssl->handshake->max_minor_ver,
ssl->conf->transport, ver );
/* Avoid data-dependent branches while checking for invalid
* padding, to protect against timing-based Bleichenbacher-type
* attacks. */
diff = (unsigned int) ret;
diff |= peer_pmslen ^ 48;
diff |= peer_pms[0] ^ ver[0];
diff |= peer_pms[1] ^ ver[1];
/* mask = diff ? 0xff : 0x00 using bit operations to avoid branches */
/* MSVC has a warning about unary minus on unsigned, but this is
* well-defined and precisely what we want to do here */
#if defined(_MSC_VER)
#pragma warning( push )
#pragma warning( disable : 4146 )
#endif
mask = - ( ( diff | - diff ) >> ( sizeof( unsigned int ) * 8 - 1 ) );
#if defined(_MSC_VER)
#pragma warning( pop )
#endif
/*
* Protection against Bleichenbacher's attack: invalid PKCS#1 v1.5 padding
* must not cause the connection to end immediately; instead, send a
* bad_record_mac later in the handshake.
* To protect against timing-based variants of the attack, we must
* not have any branch that depends on whether the decryption was
* successful. In particular, always generate the fake premaster secret,
* regardless of whether it will ultimately influence the output or not.
*/
ret = ssl->conf->f_rng( ssl->conf->p_rng, fake_pms, sizeof( fake_pms ) );
if( ret != 0 )
{
/* It's ok to abort on an RNG failure, since this does not reveal
* anything about the RSA decryption. */
return( ret );
}
#if defined(MBEDTLS_SSL_DEBUG_ALL)
if( diff != 0 )
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
#endif
if( sizeof( ssl->handshake->premaster ) < pms_offset ||
sizeof( ssl->handshake->premaster ) - pms_offset < 48 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
ssl->handshake->pmslen = 48;
/* Set pms to either the true or the fake PMS, without
* data-dependent branches. */
for( i = 0; i < ssl->handshake->pmslen; i++ )
pms[i] = ( mask & fake_pms[i] ) | ( (~mask) & peer_pms[i] );
return( 0 );
}
#endif /* MBEDTLS_KEY_EXCHANGE_RSA_ENABLED ||
MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED)
static int ssl_parse_client_psk_identity( mbedtls_ssl_context *ssl, unsigned char **p,
const unsigned char *end )
{
int ret = 0;
uint16_t n;
if( ssl_conf_has_psk_or_cb( ssl->conf ) == 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no pre-shared key" ) );
return( MBEDTLS_ERR_SSL_PRIVATE_KEY_REQUIRED );
}
/*
* Receive client pre-shared key identity name
*/
if( end - *p < 2 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
n = ( (*p)[0] << 8 ) | (*p)[1];
*p += 2;
if( n == 0 || n > end - *p )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ssl->conf->f_psk != NULL )
{
if( ssl->conf->f_psk( ssl->conf->p_psk, ssl, *p, n ) != 0 )
ret = MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY;
}
else
{
/* Identity is not a big secret since clients send it in the clear,
* but treat it carefully anyway, just in case */
if( n != ssl->conf->psk_identity_len ||
mbedtls_ssl_safer_memcmp( ssl->conf->psk_identity, *p, n ) != 0 )
{
ret = MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY;
}
}
if( ret == MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY )
{
MBEDTLS_SSL_DEBUG_BUF( 3, "Unknown PSK identity", *p, n );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_UNKNOWN_PSK_IDENTITY );
return( MBEDTLS_ERR_SSL_UNKNOWN_IDENTITY );
}
*p += n;
return( 0 );
}
#endif /* MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED */
static int ssl_parse_client_key_exchange( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info;
unsigned char *p, *end;
ciphersuite_info = ssl->handshake->ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse client key exchange" ) );
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE) && \
( defined(MBEDTLS_KEY_EXCHANGE_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED) )
if( ( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA_PSK ||
ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA ) &&
( ssl->handshake->async_in_progress != 0 ) )
{
/* We've already read a record and there is an asynchronous
* operation in progress to decrypt it. So skip reading the
* record. */
MBEDTLS_SSL_DEBUG_MSG( 3, ( "will resume decryption of previously-read record" ) );
}
else
#endif
if( ( ret = mbedtls_ssl_read_record( ssl, 1 ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_read_record", ret );
return( ret );
}
p = ssl->in_msg + mbedtls_ssl_hs_hdr_len( ssl );
end = ssl->in_msg + ssl->in_hslen;
if( ssl->in_msgtype != MBEDTLS_SSL_MSG_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ssl->in_msg[0] != MBEDTLS_SSL_HS_CLIENT_KEY_EXCHANGE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
#if defined(MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_DHE_RSA )
{
if( ( ret = ssl_parse_client_dh_public( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_dh_public" ), ret );
return( ret );
}
if( p != end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ( ret = mbedtls_dhm_calc_secret( &ssl->handshake->dhm_ctx,
ssl->handshake->premaster,
MBEDTLS_PREMASTER_SIZE,
&ssl->handshake->pmslen,
ssl->conf->f_rng, ssl->conf->p_rng ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_dhm_calc_secret", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE_CS );
}
MBEDTLS_SSL_DEBUG_MPI( 3, "DHM: K ", &ssl->handshake->dhm_ctx.K );
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) || \
defined(MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_RSA ||
ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA ||
ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDH_RSA ||
ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA )
{
if( ( ret = mbedtls_ecdh_read_public( &ssl->handshake->ecdh_ctx,
p, end - p) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecdh_read_public", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE_RP );
}
MBEDTLS_SSL_DEBUG_ECDH( 3, &ssl->handshake->ecdh_ctx,
MBEDTLS_DEBUG_ECDH_QP );
if( ( ret = mbedtls_ecdh_calc_secret( &ssl->handshake->ecdh_ctx,
&ssl->handshake->pmslen,
ssl->handshake->premaster,
MBEDTLS_MPI_MAX_SIZE,
ssl->conf->f_rng, ssl->conf->p_rng ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecdh_calc_secret", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE_CS );
}
MBEDTLS_SSL_DEBUG_ECDH( 3, &ssl->handshake->ecdh_ctx,
MBEDTLS_DEBUG_ECDH_Z );
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED ||
MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED ||
MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED ||
MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_PSK_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_PSK )
{
if( ( ret = ssl_parse_client_psk_identity( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_psk_identity" ), ret );
return( ret );
}
if( p != end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
/* For opaque PSKs, we perform the PSK-to-MS derivation atomatically
* and skip the intermediate PMS. */
if( ssl_use_opaque_psk( ssl ) == 1 )
MBEDTLS_SSL_DEBUG_MSG( 1, ( "skip PMS generation for opaque PSK" ) );
else
#endif /* MBEDTLS_USE_PSA_CRYPTO */
if( ( ret = mbedtls_ssl_psk_derive_premaster( ssl,
ciphersuite_info->key_exchange ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_psk_derive_premaster", ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA_PSK )
{
#if defined(MBEDTLS_SSL_ASYNC_PRIVATE)
if ( ssl->handshake->async_in_progress != 0 )
{
/* There is an asynchronous operation in progress to
* decrypt the encrypted premaster secret, so skip
* directly to resuming this operation. */
MBEDTLS_SSL_DEBUG_MSG( 3, ( "PSK identity already parsed" ) );
/* Update p to skip the PSK identity. ssl_parse_encrypted_pms
* won't actually use it, but maintain p anyway for robustness. */
p += ssl->conf->psk_identity_len + 2;
}
else
#endif /* MBEDTLS_SSL_ASYNC_PRIVATE */
if( ( ret = ssl_parse_client_psk_identity( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_psk_identity" ), ret );
return( ret );
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
/* Opaque PSKs are currently only supported for PSK-only. */
if( ssl_use_opaque_psk( ssl ) == 1 )
return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE );
#endif
if( ( ret = ssl_parse_encrypted_pms( ssl, p, end, 2 ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_encrypted_pms" ), ret );
return( ret );
}
if( ( ret = mbedtls_ssl_psk_derive_premaster( ssl,
ciphersuite_info->key_exchange ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_psk_derive_premaster", ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_DHE_PSK )
{
if( ( ret = ssl_parse_client_psk_identity( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_psk_identity" ), ret );
return( ret );
}
if( ( ret = ssl_parse_client_dh_public( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_dh_public" ), ret );
return( ret );
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
/* Opaque PSKs are currently only supported for PSK-only. */
if( ssl_use_opaque_psk( ssl ) == 1 )
return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE );
#endif
if( p != end )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client key exchange" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE );
}
if( ( ret = mbedtls_ssl_psk_derive_premaster( ssl,
ciphersuite_info->key_exchange ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_psk_derive_premaster", ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_PSK )
{
if( ( ret = ssl_parse_client_psk_identity( ssl, &p, end ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_client_psk_identity" ), ret );
return( ret );
}
if( ( ret = mbedtls_ecdh_read_public( &ssl->handshake->ecdh_ctx,
p, end - p ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecdh_read_public", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_KEY_EXCHANGE_RP );
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
/* Opaque PSKs are currently only supported for PSK-only. */
if( ssl_use_opaque_psk( ssl ) == 1 )
return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE );
#endif
MBEDTLS_SSL_DEBUG_ECDH( 3, &ssl->handshake->ecdh_ctx,
MBEDTLS_DEBUG_ECDH_QP );
if( ( ret = mbedtls_ssl_psk_derive_premaster( ssl,
ciphersuite_info->key_exchange ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_psk_derive_premaster", ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_RSA_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_RSA )
{
if( ( ret = ssl_parse_encrypted_pms( ssl, p, end, 0 ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "ssl_parse_parse_encrypted_pms_secret" ), ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_RSA_ENABLED */
#if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED)
if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECJPAKE )
{
ret = mbedtls_ecjpake_read_round_two( &ssl->handshake->ecjpake_ctx,
p, end - p );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecjpake_read_round_two", ret );
return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE );
}
ret = mbedtls_ecjpake_derive_secret( &ssl->handshake->ecjpake_ctx,
ssl->handshake->premaster, 32, &ssl->handshake->pmslen,
ssl->conf->f_rng, ssl->conf->p_rng );
if( ret != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecjpake_derive_secret", ret );
return( ret );
}
}
else
#endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
if( ( ret = mbedtls_ssl_derive_keys( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_derive_keys", ret );
return( ret );
}
ssl->state++;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse client key exchange" ) );
return( 0 );
}
#if !defined(MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED)
static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl )
{
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse certificate verify" ) );
if( !mbedtls_ssl_ciphersuite_cert_req_allowed( ciphersuite_info ) )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip parse certificate verify" ) );
ssl->state++;
return( 0 );
}
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
#else /* !MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */
static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE;
size_t i, sig_len;
unsigned char hash[48];
unsigned char *hash_start = hash;
size_t hashlen;
#if defined(MBEDTLS_SSL_PROTO_TLS1_2)
mbedtls_pk_type_t pk_alg;
#endif
mbedtls_md_type_t md_alg;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info =
ssl->handshake->ciphersuite_info;
mbedtls_pk_context * peer_pk;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse certificate verify" ) );
if( !mbedtls_ssl_ciphersuite_cert_req_allowed( ciphersuite_info ) )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip parse certificate verify" ) );
ssl->state++;
return( 0 );
}
#if defined(MBEDTLS_SSL_KEEP_PEER_CERTIFICATE)
if( ssl->session_negotiate->peer_cert == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip parse certificate verify" ) );
ssl->state++;
return( 0 );
}
#else /* MBEDTLS_SSL_KEEP_PEER_CERTIFICATE */
if( ssl->session_negotiate->peer_cert_digest == NULL )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= skip parse certificate verify" ) );
ssl->state++;
return( 0 );
}
#endif /* !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE */
/* Read the message without adding it to the checksum */
ret = mbedtls_ssl_read_record( ssl, 0 /* no checksum update */ );
if( 0 != ret )
{
MBEDTLS_SSL_DEBUG_RET( 1, ( "mbedtls_ssl_read_record" ), ret );
return( ret );
}
ssl->state++;
/* Process the message contents */
if( ssl->in_msgtype != MBEDTLS_SSL_MSG_HANDSHAKE ||
ssl->in_msg[0] != MBEDTLS_SSL_HS_CERTIFICATE_VERIFY )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad certificate verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
i = mbedtls_ssl_hs_hdr_len( ssl );
#if !defined(MBEDTLS_SSL_KEEP_PEER_CERTIFICATE)
peer_pk = &ssl->handshake->peer_pubkey;
#else /* !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE */
if( ssl->session_negotiate->peer_cert == NULL )
{
/* Should never happen */
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
peer_pk = &ssl->session_negotiate->peer_cert->pk;
#endif /* MBEDTLS_SSL_KEEP_PEER_CERTIFICATE */
/*
* struct {
* SignatureAndHashAlgorithm algorithm; -- TLS 1.2 only
* opaque signature<0..2^16-1>;
* } DigitallySigned;
*/
#if defined(MBEDTLS_SSL_PROTO_SSL3) || defined(MBEDTLS_SSL_PROTO_TLS1) || \
defined(MBEDTLS_SSL_PROTO_TLS1_1)
if( ssl->minor_ver != MBEDTLS_SSL_MINOR_VERSION_3 )
{
md_alg = MBEDTLS_MD_NONE;
hashlen = 36;
/* For ECDSA, use SHA-1, not MD-5 + SHA-1 */
if( mbedtls_pk_can_do( peer_pk, MBEDTLS_PK_ECDSA ) )
{
hash_start += 16;
hashlen -= 16;
md_alg = MBEDTLS_MD_SHA1;
}
}
else
#endif /* MBEDTLS_SSL_PROTO_SSL3 || MBEDTLS_SSL_PROTO_TLS1 ||
MBEDTLS_SSL_PROTO_TLS1_1 */
#if defined(MBEDTLS_SSL_PROTO_TLS1_2)
if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 )
{
if( i + 2 > ssl->in_hslen )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad certificate verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
/*
* Hash
*/
md_alg = mbedtls_ssl_md_alg_from_hash( ssl->in_msg[i] );
if( md_alg == MBEDTLS_MD_NONE || mbedtls_ssl_set_calc_verify_md( ssl, ssl->in_msg[i] ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "peer not adhering to requested sig_alg"
" for verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
#if !defined(MBEDTLS_MD_SHA1)
if( MBEDTLS_MD_SHA1 == md_alg )
hash_start += 16;
#endif
/* Info from md_alg will be used instead */
hashlen = 0;
i++;
/*
* Signature
*/
if( ( pk_alg = mbedtls_ssl_pk_alg_from_sig( ssl->in_msg[i] ) )
== MBEDTLS_PK_NONE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "peer not adhering to requested sig_alg"
" for verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
/*
* Check the certificate's key type matches the signature alg
*/
if( !mbedtls_pk_can_do( peer_pk, pk_alg ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "sig_alg doesn't match cert key" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
i++;
}
else
#endif /* MBEDTLS_SSL_PROTO_TLS1_2 */
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) );
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
if( i + 2 > ssl->in_hslen )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad certificate verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
sig_len = ( ssl->in_msg[i] << 8 ) | ssl->in_msg[i+1];
i += 2;
if( i + sig_len != ssl->in_hslen )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad certificate verify message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CERTIFICATE_VERIFY );
}
/* Calculate hash and verify signature */
{
size_t dummy_hlen;
ssl->handshake->calc_verify( ssl, hash, &dummy_hlen );
}
if( ( ret = mbedtls_pk_verify( peer_pk,
md_alg, hash_start, hashlen,
ssl->in_msg + i, sig_len ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_pk_verify", ret );
return( ret );
}
mbedtls_ssl_update_handshake_status( ssl );
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse certificate verify" ) );
return( ret );
}
#endif /* MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
static int ssl_write_new_session_ticket( mbedtls_ssl_context *ssl )
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t tlen;
uint32_t lifetime;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write new session ticket" ) );
ssl->out_msgtype = MBEDTLS_SSL_MSG_HANDSHAKE;
ssl->out_msg[0] = MBEDTLS_SSL_HS_NEW_SESSION_TICKET;
/*
* struct {
* uint32 ticket_lifetime_hint;
* opaque ticket<0..2^16-1>;
* } NewSessionTicket;
*
* 4 . 7 ticket_lifetime_hint (0 = unspecified)
* 8 . 9 ticket_len (n)
* 10 . 9+n ticket content
*/
if( ( ret = ssl->conf->f_ticket_write( ssl->conf->p_ticket,
ssl->session_negotiate,
ssl->out_msg + 10,
ssl->out_msg + MBEDTLS_SSL_OUT_CONTENT_LEN,
&tlen, &lifetime ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_ticket_write", ret );
tlen = 0;
}
ssl->out_msg[4] = ( lifetime >> 24 ) & 0xFF;
ssl->out_msg[5] = ( lifetime >> 16 ) & 0xFF;
ssl->out_msg[6] = ( lifetime >> 8 ) & 0xFF;
ssl->out_msg[7] = ( lifetime ) & 0xFF;
ssl->out_msg[8] = (unsigned char)( ( tlen >> 8 ) & 0xFF );
ssl->out_msg[9] = (unsigned char)( ( tlen ) & 0xFF );
ssl->out_msglen = 10 + tlen;
/*
* Morally equivalent to updating ssl->state, but NewSessionTicket and
* ChangeCipherSpec share the same state.
*/
ssl->handshake->new_session_ticket = 0;
if( ( ret = mbedtls_ssl_write_handshake_msg( ssl ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_write_handshake_msg", ret );
return( ret );
}
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= write new session ticket" ) );
return( 0 );
}
#endif /* MBEDTLS_SSL_SESSION_TICKETS */
/*
* SSL handshake -- server side -- single step
*/
int mbedtls_ssl_handshake_server_step( mbedtls_ssl_context *ssl )
{
int ret = 0;
if( ssl->state == MBEDTLS_SSL_HANDSHAKE_OVER || ssl->handshake == NULL )
return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA );
MBEDTLS_SSL_DEBUG_MSG( 2, ( "server state: %d", ssl->state ) );
if( ( ret = mbedtls_ssl_flush_output( ssl ) ) != 0 )
return( ret );
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
ssl->handshake->retransmit_state == MBEDTLS_SSL_RETRANS_SENDING )
{
if( ( ret = mbedtls_ssl_flight_transmit( ssl ) ) != 0 )
return( ret );
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
switch( ssl->state )
{
case MBEDTLS_SSL_HELLO_REQUEST:
ssl->state = MBEDTLS_SSL_CLIENT_HELLO;
break;
/*
* <== ClientHello
*/
case MBEDTLS_SSL_CLIENT_HELLO:
ret = ssl_parse_client_hello( ssl );
break;
#if defined(MBEDTLS_SSL_PROTO_DTLS)
case MBEDTLS_SSL_SERVER_HELLO_VERIFY_REQUEST_SENT:
return( MBEDTLS_ERR_SSL_HELLO_VERIFY_REQUIRED );
#endif
/*
* ==> ServerHello
* Certificate
* ( ServerKeyExchange )
* ( CertificateRequest )
* ServerHelloDone
*/
case MBEDTLS_SSL_SERVER_HELLO:
ret = ssl_write_server_hello( ssl );
break;
case MBEDTLS_SSL_SERVER_CERTIFICATE:
ret = mbedtls_ssl_write_certificate( ssl );
break;
case MBEDTLS_SSL_SERVER_KEY_EXCHANGE:
ret = ssl_write_server_key_exchange( ssl );
break;
case MBEDTLS_SSL_CERTIFICATE_REQUEST:
ret = ssl_write_certificate_request( ssl );
break;
case MBEDTLS_SSL_SERVER_HELLO_DONE:
ret = ssl_write_server_hello_done( ssl );
break;
/*
* <== ( Certificate/Alert )
* ClientKeyExchange
* ( CertificateVerify )
* ChangeCipherSpec
* Finished
*/
case MBEDTLS_SSL_CLIENT_CERTIFICATE:
ret = mbedtls_ssl_parse_certificate( ssl );
break;
case MBEDTLS_SSL_CLIENT_KEY_EXCHANGE:
ret = ssl_parse_client_key_exchange( ssl );
break;
case MBEDTLS_SSL_CERTIFICATE_VERIFY:
ret = ssl_parse_certificate_verify( ssl );
break;
case MBEDTLS_SSL_CLIENT_CHANGE_CIPHER_SPEC:
ret = mbedtls_ssl_parse_change_cipher_spec( ssl );
break;
case MBEDTLS_SSL_CLIENT_FINISHED:
ret = mbedtls_ssl_parse_finished( ssl );
break;
/*
* ==> ( NewSessionTicket )
* ChangeCipherSpec
* Finished
*/
case MBEDTLS_SSL_SERVER_CHANGE_CIPHER_SPEC:
#if defined(MBEDTLS_SSL_SESSION_TICKETS)
if( ssl->handshake->new_session_ticket != 0 )
ret = ssl_write_new_session_ticket( ssl );
else
#endif
ret = mbedtls_ssl_write_change_cipher_spec( ssl );
break;
case MBEDTLS_SSL_SERVER_FINISHED:
ret = mbedtls_ssl_write_finished( ssl );
break;
case MBEDTLS_SSL_FLUSH_BUFFERS:
MBEDTLS_SSL_DEBUG_MSG( 2, ( "handshake: done" ) );
ssl->state = MBEDTLS_SSL_HANDSHAKE_WRAPUP;
break;
case MBEDTLS_SSL_HANDSHAKE_WRAPUP:
mbedtls_ssl_handshake_wrapup( ssl );
break;
default:
MBEDTLS_SSL_DEBUG_MSG( 1, ( "invalid state %d", ssl->state ) );
return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA );
}
return( ret );
}
#endif /* MBEDTLS_SSL_SRV_C */
| mc-server/polarssl | library/ssl_srv.c | C | gpl-2.0 | 158,375 |
/*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Ring initialization rules:
* 1. Each segment is initialized to zero, except for link TRBs.
* 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
* Consumer Cycle State (CCS), depending on ring function.
* 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
*
* Ring behavior rules:
* 1. A ring is empty if enqueue == dequeue. This means there will always be at
* least one free TRB in the ring. This is useful if you want to turn that
* into a link TRB and expand the ring.
* 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
* link TRB, then load the pointer with the address in the link TRB. If the
* link TRB had its toggle bit set, you may need to update the ring cycle
* state (see cycle bit rules). You may have to do this multiple times
* until you reach a non-link TRB.
* 3. A ring is full if enqueue++ (for the definition of increment above)
* equals the dequeue pointer.
*
* Cycle bit rules:
* 1. When a consumer increments a dequeue pointer and encounters a toggle bit
* in a link TRB, it must toggle the ring cycle state.
* 2. When a producer increments an enqueue pointer and encounters a toggle bit
* in a link TRB, it must toggle the ring cycle state.
*
* Producer rules:
* 1. Check if ring is full before you enqueue.
* 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
* Update enqueue pointer between each write (which may update the ring
* cycle state).
* 3. Notify consumer. If SW is producer, it rings the doorbell for command
* and endpoint rings. If HC is the producer for the event ring,
* and it generates an interrupt according to interrupt modulation rules.
*
* Consumer rules:
* 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
* the TRB is owned by the consumer.
* 2. Update dequeue pointer (which may update the ring cycle state) and
* continue processing TRBs until you reach a TRB which is not owned by you.
* 3. Notify the producer. SW is the consumer for the event ring, and it
* updates event ring dequeue pointer. HC is the consumer for the command and
* endpoint rings; it generates events on the event ring for these.
*/
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include "xhci.h"
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct xhci_event_cmd *event);
/*
* Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
* address of the TRB.
*/
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
union xhci_trb *trb)
{
unsigned long segment_offset;
if (!seg || !trb || trb < seg->trbs)
return 0;
/* offset in TRBs */
segment_offset = trb - seg->trbs;
if (segment_offset > TRBS_PER_SEGMENT)
return 0;
return seg->dma + (segment_offset * sizeof(*trb));
}
/* Does this link TRB point to the first segment in a ring,
* or was the previous TRB the last TRB on the last segment in the ERST?
*/
static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
if (ring == xhci->event_ring)
return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
(seg->next == xhci->event_ring->first_seg);
else
return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}
/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
* segment? I.e. would the updated event TRB pointer step off the end of the
* event seg?
*/
static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
if (ring == xhci->event_ring)
return trb == &seg->trbs[TRBS_PER_SEGMENT];
else
return TRB_TYPE_LINK_LE32(trb->link.control);
}
static int enqueue_is_link_trb(struct xhci_ring *ring)
{
struct xhci_link_trb *link = &ring->enqueue->link;
return TRB_TYPE_LINK_LE32(link->control);
}
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
*/
static void next_trb(struct xhci_hcd *xhci,
struct xhci_ring *ring,
struct xhci_segment **seg,
union xhci_trb **trb)
{
if (last_trb(xhci, ring, *seg, *trb)) {
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
(*trb)++;
}
}
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
union xhci_trb *next;
unsigned long long addr;
ring->deq_updates++;
/* If this is not event ring, there is one more usable TRB */
if (ring->type != TYPE_EVENT &&
!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
ring->num_trbs_free++;
next = ++(ring->dequeue);
/* Update the dequeue pointer further if that was a link TRB or we're at
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->deq_seg, next)) {
if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci,
ring, ring->deq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
}
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
next = ring->dequeue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
}
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*
* If we've just enqueued a TRB that is in the middle of a TD (meaning the
* chain bit is set), then set the chain bit in all the following link TRBs.
* If we've enqueued the last TRB in a TD, make sure the following link TRBs
* have their chain bit cleared (so that each Link TRB is a separate TD).
*
* Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
* set, but other sections talk about dealing with the chain bit set. This was
* fixed in the 0.96 specification errata, but we have to assume that all 0.95
* xHCI hardware can't handle the chain bit being cleared on a link TRB.
*
* @more_trbs_coming: Will you enqueue more TRBs before calling
* prepare_transfer()?
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool more_trbs_coming)
{
u32 chain;
union xhci_trb *next;
unsigned long long addr;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
/* If this is not event ring, there is one less usable TRB */
if (ring->type != TYPE_EVENT &&
!last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
ring->num_trbs_free--;
next = ++(ring->enqueue);
ring->enq_updates++;
/* Update the dequeue pointer further if that was a link TRB or we're at
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (ring->type != TYPE_EVENT) {
/*
* If the caller doesn't plan on enqueueing more
* TDs before ringing the doorbell, then we
* don't want to give the link TRB to the
* hardware just yet. We'll give the link TRB
* back in prepare_ring() just before we enqueue
* the TD at the top of the ring.
*/
if (!chain && !more_trbs_coming)
break;
/* If we're not dealing with 0.95 hardware or
* isoc rings on AMD 0.96 host,
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
if (!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST))
&& !xhci_link_trb_quirk(xhci)) {
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
cpu_to_le32(chain);
}
/* Give this link TRB to the hardware */
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
}
}
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
}
/*
* Check to see if there's room to enqueue num_trbs on the ring and make sure
* enqueue pointer will not advance into dequeue segment. See rules above.
*/
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs)
{
int num_trbs_in_deq_seg;
if (ring->num_trbs_free < num_trbs)
return 0;
if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
return 0;
}
return 1;
}
/* Ring the host controller doorbell after placing a command on the ring */
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{
xhci_dbg(xhci, "// Ding dong!\n");
xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
xhci_readl(xhci, &xhci->dba->doorbell[0]);
}
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index,
unsigned int stream_id)
{
__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
unsigned int ep_state = ep->ep_state;
/* Don't ring the doorbell for this endpoint if there are pending
* cancellations because we don't want to interrupt processing.
* We don't want to restart any stream rings if there's a set dequeue
* pointer command pending because the device can choose to start any
* stream once the endpoint is on the HW schedule.
* FIXME - check all the stream rings for pending cancellations.
*/
if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED))
return;
xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
/* The CPU has better things to do at this point than wait for a
* write-posting flush. It'll get there soon enough.
*/
}
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index)
{
unsigned int stream_id;
struct xhci_virt_ep *ep;
ep = &xhci->devs[slot_id]->eps[ep_index];
/* A ring has pending URBs if its TD list is not empty */
if (!(ep->ep_state & EP_HAS_STREAMS)) {
if (!(list_empty(&ep->ring->td_list)))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
return;
}
for (stream_id = 1; stream_id < ep->stream_info->num_streams;
stream_id++) {
struct xhci_stream_info *stream_info = ep->stream_info;
if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
stream_id);
}
}
/*
* Find the segment that trb is in. Start searching in start_seg.
* If we must move past a segment that has a link TRB with a toggle cycle state
* bit set, then we will toggle the value pointed at by cycle_state.
*/
static struct xhci_segment *find_trb_seg(
struct xhci_segment *start_seg,
union xhci_trb *trb, int *cycle_state)
{
struct xhci_segment *cur_seg = start_seg;
struct xhci_generic_trb *generic_trb;
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
*cycle_state ^= 0x1;
cur_seg = cur_seg->next;
if (cur_seg == start_seg)
/* Looped over the entire list. Oops! */
return NULL;
}
return cur_seg;
}
static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id)
{
struct xhci_virt_ep *ep;
ep = &xhci->devs[slot_id]->eps[ep_index];
/* Common case: no streams */
if (!(ep->ep_state & EP_HAS_STREAMS))
return ep->ring;
if (stream_id == 0) {
xhci_warn(xhci,
"WARN: Slot ID %u, ep index %u has streams, "
"but URB has no stream ID.\n",
slot_id, ep_index);
return NULL;
}
if (stream_id < ep->stream_info->num_streams)
return ep->stream_info->stream_rings[stream_id];
xhci_warn(xhci,
"WARN: Slot ID %u, ep index %u has "
"stream IDs 1 to %u allocated, "
"but stream ID %u is requested.\n",
slot_id, ep_index,
ep->stream_info->num_streams - 1,
stream_id);
return NULL;
}
/* Get the right ring for the given URB.
* If the endpoint supports streams, boundary check the URB's stream ID.
* If the endpoint doesn't support streams, return the singular endpoint ring.
*/
static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
struct urb *urb)
{
return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
}
/*
* Move the xHC's endpoint ring dequeue pointer past cur_td.
* Record the new state of the xHC's endpoint ring dequeue segment,
* dequeue pointer, and new consumer cycle state in state.
* Update our internal representation of the ring's dequeue pointer.
*
* We do this in three jumps:
* - First we update our new ring state to be the same as when the xHC stopped.
* - Then we traverse the ring to find the segment that contains
* the last TRB in the TD. We toggle the xHC's new cycle state when we pass
* any link TRBs with the toggle cycle bit set.
* - Finally we move the dequeue state one TRB further, toggling the cycle bit
* if we've moved it past a link TRB with the toggle cycle bit set.
*
* Some of the uses of xhci_generic_trb are grotty, but if they're done
* with correct __le32 accesses they should work fine. Only users of this are
* in here.
*/
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id, struct xhci_td *cur_td,
struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
ep_index, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN can't find new dequeue state "
"for invalid stream ID %u.\n",
stream_id);
return;
}
state->new_cycle_state = 0;
xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
if (!state->new_deq_seg) {
WARN_ON(1);
return;
}
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
state->new_deq_ptr = cur_td->last_trb;
xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
if (!state->new_deq_seg) {
WARN_ON(1);
return;
}
trb = &state->new_deq_ptr->generic;
if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
(trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
/*
* If there is only one segment in a ring, find_trb_seg()'s while loop
* will not run, and it will return before it has a chance to see if it
* needs to toggle the cycle bit. It can't tell if the stalled transfer
* ended just before the link TRB on a one-segment ring, or if the TD
* wrapped around the top of the ring, because it doesn't have the TD in
* question. Look for the one-segment case where stalled TRB's address
* is greater than the new dequeue pointer address.
*/
if (ep_ring->first_seg == ep_ring->first_seg->next &&
state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
state->new_cycle_state ^= 0x1;
xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
/* Don't update the ring cycle state for the producer (us). */
xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
state->new_deq_seg);
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
(unsigned long long) addr);
}
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
* (The last TRB actually points to the ring enqueue pointer, which is not part
* of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
struct xhci_td *cur_td, bool flip_cycle)
{
struct xhci_segment *cur_seg;
union xhci_trb *cur_trb;
for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
true;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
/* Unchain any chained Link TRBs, but
* leave the pointers intact.
*/
cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
/* Flip the cycle bit (link TRBs can't be the first
* or last TRB).
*/
if (flip_cycle)
cur_trb->generic.field[3] ^=
cpu_to_le32(TRB_CYCLE);
xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
xhci_dbg(xhci, "Address = %p (0x%llx dma); "
"in seg %p (0x%llx dma)\n",
cur_trb,
(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
cur_seg,
(unsigned long long)cur_seg->dma);
} else {
cur_trb->generic.field[0] = 0;
cur_trb->generic.field[1] = 0;
cur_trb->generic.field[2] = 0;
/* Preserve only the cycle bit of this TRB */
cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
/* Flip the cycle bit except on the first or last TRB */
if (flip_cycle && cur_trb != cur_td->first_trb &&
cur_trb != cur_td->last_trb)
cur_trb->generic.field[3] ^=
cpu_to_le32(TRB_CYCLE);
cur_trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP));
xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
(unsigned long long)
xhci_trb_virt_to_dma(cur_seg, cur_trb));
}
if (cur_trb == cur_td->last_trb)
break;
}
}
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
deq_state->new_deq_seg,
(unsigned long long)deq_state->new_deq_seg->dma,
deq_state->new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
deq_state->new_cycle_state);
queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
deq_state->new_deq_seg,
deq_state->new_deq_ptr,
(u32) deq_state->new_cycle_state);
/* Stop the TD queueing code from ringing the doorbell until
* this command completes. The HC won't set the dequeue pointer
* if the ring is running, and ringing the doorbell starts the
* ring running.
*/
ep->ep_state |= SET_DEQ_PENDING;
}
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
ep->ep_state &= ~EP_HALT_PENDING;
/* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
* timer is running on another CPU, we don't decrement stop_cmds_pending
* (since we didn't successfully stop the watchdog timer).
*/
if (del_timer(&ep->stop_cmd_timer))
ep->stop_cmds_pending--;
}
/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
struct xhci_td *cur_td, int status, char *adjective)
{
struct usb_hcd *hcd;
struct urb *urb;
struct urb_priv *urb_priv;
urb = cur_td->urb;
urb_priv = urb->hcpriv;
urb_priv->td_cnt++;
hcd = bus_to_hcd(urb->dev->bus);
/* Only giveback urb when this is the last td in urb */
if (urb_priv->td_cnt == urb_priv->length) {
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(hcd, urb, status);
xhci_urb_free_priv(xhci, urb_priv);
spin_lock(&xhci->lock);
}
}
/*
* When we get a command completion for a Stop Endpoint Command, we need to
* unlink any cancelled TDs from the ring. There are two ways to do that:
*
* 1. If the HW was in the middle of processing the TD that needs to be
* cancelled, then we must move the ring's dequeue pointer past the last TRB
* in the TD with a Set Dequeue Pointer Command.
* 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
* bit cleared) so that the HW will skip over them.
*/
static void handle_stopped_endpoint(struct xhci_hcd *xhci,
union xhci_trb *trb, struct xhci_event_cmd *event)
{
unsigned int slot_id;
unsigned int ep_index;
struct xhci_virt_device *virt_dev;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct list_head *entry;
struct xhci_td *cur_td = NULL;
struct xhci_td *last_unlinked_td;
struct xhci_dequeue_state deq_state;
if (unlikely(TRB_TO_SUSPEND_PORT(
le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
slot_id = TRB_TO_SLOT_ID(
le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
virt_dev = xhci->devs[slot_id];
if (virt_dev)
handle_cmd_in_cmd_wait_list(xhci, virt_dev,
event);
else
xhci_warn(xhci, "Stop endpoint command "
"completion for disabled slot %u\n",
slot_id);
return;
}
memset(&deq_state, 0, sizeof(deq_state));
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = &xhci->devs[slot_id]->eps[ep_index];
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
ep->stopped_td = NULL;
ep->stopped_trb = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
/* Fix up the ep ring first, so HW stops executing cancelled TDs.
* We have the xHCI lock, so nothing can modify this list until we drop
* it. We're also in the event handler, so we can't get re-interrupted
* if another Stop Endpoint command completes
*/
list_for_each(entry, &ep->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
(unsigned long long)xhci_trb_virt_to_dma(
cur_td->start_seg, cur_td->first_trb));
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
if (!ep_ring) {
/* This shouldn't happen unless a driver is mucking
* with the stream ID after submission. This will
* leave the TD on the hardware ring, and the hardware
* will try to execute it, and may access a buffer
* that has already been freed. In the best case, the
* hardware will execute it, and the event handler will
* ignore the completion event for that TD, since it was
* removed from the td_list for that endpoint. In
* short, don't muck with the stream ID after
* submission.
*/
xhci_warn(xhci, "WARN Cancelled URB %p "
"has invalid stream ID %u.\n",
cur_td->urb,
cur_td->urb->stream_id);
goto remove_finished_td;
}
/*
* If we stopped on the TD we need to cancel, then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
*/
if (cur_td == ep->stopped_td)
xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
cur_td->urb->stream_id,
cur_td, &deq_state);
else
td_to_noop(xhci, ep_ring, cur_td, false);
remove_finished_td:
/*
* The event handler won't see a completion for this TD anymore,
* so remove it from the endpoint ring's TD list. Keep it in
* the cancelled TD list for URB completion later.
*/
list_del_init(&cur_td->td_list);
}
last_unlinked_td = cur_td;
xhci_stop_watchdog_timer_in_irq(xhci, ep);
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
xhci_queue_new_dequeue_state(xhci,
slot_id, ep_index,
ep->stopped_td->urb->stream_id,
&deq_state);
xhci_ring_cmd_db(xhci);
} else {
/* Otherwise ring the doorbell(s) to restart queued transfers */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
ep->stopped_td = NULL;
ep->stopped_trb = NULL;
/*
* Drop the lock and complete the URBs in the cancelled TD list.
* New TDs to be cancelled might be added to the end of the list before
* we can complete all the URBs for the TDs we already unlinked.
* So stop when we've completed the URB for the last TD we unlinked.
*/
do {
cur_td = list_entry(ep->cancelled_td_list.next,
struct xhci_td, cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
/* Clean up the cancelled URB */
/* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
/* Stop processing the cancelled list if the watchdog timer is
* running.
*/
if (xhci->xhc_state & XHCI_STATE_DYING)
return;
} while (cur_td != last_unlinked_td);
/* Return to the event handler with xhci->lock re-acquired */
}
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
* let the event ring handler and the URB dequeueing/enqueueing functions know
* through xhci->state.
*
* The timer may also fire if the host takes a very long time to respond to the
* command, and the stop endpoint command completion handler cannot delete the
* timer before the timer function is called. Another endpoint cancellation may
* sneak in before the timer function can grab the lock, and that may queue
* another stop endpoint command and add the timer back. So we cannot use a
* simple flag to say whether there is a pending stop endpoint command for a
* particular endpoint.
*
* Instead we use a combination of that flag and a counter for the number of
* pending stop endpoint commands. If the timer is the tail end of the last
* stop endpoint command, and the endpoint's command is still pending, we assume
* the host is dying.
*/
void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
struct xhci_hcd *xhci;
struct xhci_virt_ep *ep;
struct xhci_virt_ep *temp_ep;
struct xhci_ring *ring;
struct xhci_td *cur_td;
int ret, i, j;
unsigned long flags;
ep = (struct xhci_virt_ep *) arg;
xhci = ep->xhci;
spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
"xHCI as DYING, exiting.\n");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
"exiting.\n");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
xhci_warn(xhci, "Assuming host is dying, halting host.\n");
/* Oops, HC is dead or dying or at least not responding to the stop
* endpoint command.
*/
xhci->xhc_state |= XHCI_STATE_DYING;
/* Disable interrupts from the host controller and start halting it */
xhci_quiesce(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
ret = xhci_halt(xhci);
spin_lock_irqsave(&xhci->lock, flags);
if (ret < 0) {
/* This is bad; the host is not responding to commands and it's
* not allowing itself to be halted. At least interrupts are
* disabled. If we call usb_hc_died(), it will attempt to
* disconnect all device drivers under this host. Those
* disconnect() methods will wait for all URBs to be unlinked,
* so we must complete them.
*/
xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
xhci_warn(xhci, "Completing active URBs anyway.\n");
/* We could turn all TDs on the rings to no-ops. This won't
* help if the host has cached part of the ring, and is slow if
* we want to preserve the cycle bit. Skip it and hope the host
* doesn't touch the memory.
*/
}
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
for (j = 0; j < 31; j++) {
temp_ep = &xhci->devs[i]->eps[j];
ring = temp_ep->ring;
if (!ring)
continue;
xhci_dbg(xhci, "Killing URBs for slot ID %u, "
"ep index %u\n", i, j);
while (!list_empty(&ring->td_list)) {
cur_td = list_first_entry(&ring->td_list,
struct xhci_td,
td_list);
list_del_init(&cur_td->td_list);
if (!list_empty(&cur_td->cancelled_td_list))
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
-ESHUTDOWN, "killed");
}
while (!list_empty(&temp_ep->cancelled_td_list)) {
cur_td = list_first_entry(
&temp_ep->cancelled_td_list,
struct xhci_td,
cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
-ESHUTDOWN, "killed");
}
}
}
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Calling usb_hc_died()\n");
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
xhci_dbg(xhci, "xHCI host controller is dead.\n");
}
static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_virt_device *dev,
struct xhci_ring *ep_ring,
unsigned int ep_index)
{
union xhci_trb *dequeue_temp;
int num_trbs_free_temp;
bool revert = false;
num_trbs_free_temp = ep_ring->num_trbs_free;
dequeue_temp = ep_ring->dequeue;
while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
/* We have more usable TRBs */
ep_ring->num_trbs_free++;
ep_ring->dequeue++;
if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
ep_ring->dequeue)) {
if (ep_ring->dequeue ==
dev->eps[ep_index].queued_deq_ptr)
break;
ep_ring->deq_seg = ep_ring->deq_seg->next;
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
if (ep_ring->dequeue == dequeue_temp) {
revert = true;
break;
}
}
if (revert) {
xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
ep_ring->num_trbs_free = num_trbs_free_temp;
}
}
/*
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
* we need to clear the set deq pending flag in the endpoint ring state, so that
* the TD queueing code can ring the doorbell again. We also need to ring the
* endpoint doorbell to restart the ring, but only if there aren't more
* cancellations pending.
*/
static void handle_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event,
union xhci_trb *trb)
{
unsigned int slot_id;
unsigned int ep_index;
unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN Set TR deq ptr command for "
"freed stream ID %u\n",
stream_id);
/* XXX: Harmless??? */
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
return;
}
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
unsigned int ep_state;
unsigned int slot_state;
switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
case COMP_TRB_ERR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
"of stream ID configuration\n");
break;
case COMP_CTX_STATE:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
"to incorrect slot or ep state.\n");
ep_state = le32_to_cpu(ep_ctx->ep_info);
ep_state &= EP_STATE_MASK;
slot_state = le32_to_cpu(slot_ctx->dev_state);
slot_state = GET_SLOT_STATE(slot_state);
xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
slot_state, ep_state);
break;
case COMP_EBADSLT:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
"slot %u was not enabled.\n", slot_id);
break;
default:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
"completion code of %u.\n",
GET_COMP_CODE(le32_to_cpu(event->status)));
break;
}
/* OK what do we do now? The endpoint state is hosed, and we
* should never get to this point if the synchronization between
* queueing, and endpoint state are correct. This might happen
* if the device gets disconnected after we've finished
* cancelling URBs, which might not be an error...
*/
} else {
xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
le64_to_cpu(ep_ctx->deq));
if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
dev->eps[ep_index].queued_deq_ptr) ==
(le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
update_ring_for_set_deq_completion(xhci, dev,
ep_ring, ep_index);
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq "
"Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
dev->eps[ep_index].queued_deq_seg,
dev->eps[ep_index].queued_deq_ptr);
}
}
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
dev->eps[ep_index].queued_deq_seg = NULL;
dev->eps[ep_index].queued_deq_ptr = NULL;
/* Restart any rings with pending URBs */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void handle_reset_ep_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event,
union xhci_trb *trb)
{
int slot_id;
unsigned int ep_index;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
GET_COMP_CODE(le32_to_cpu(event->status)));
/* HW with the reset endpoint quirk needs to have a configure endpoint
* command complete before the endpoint can be used. Queue that here
* because the HW can't handle two commands being queued in a row.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
xhci_dbg(xhci, "Queueing configure endpoint command\n");
xhci_queue_configure_endpoint(xhci,
xhci->devs[slot_id]->in_ctx->dma, slot_id,
false);
xhci_ring_cmd_db(xhci);
} else {
/* Clear our internal halted state and restart the ring(s) */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
}
/* Check to see if a command in the device's command queue matches this one.
* Signal the completion or free the command, and return 1. Return 0 if the
* completed command isn't at the head of the command list.
*/
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct xhci_event_cmd *event)
{
struct xhci_command *command;
if (list_empty(&virt_dev->cmd_list))
return 0;
command = list_entry(virt_dev->cmd_list.next,
struct xhci_command, cmd_list);
if (xhci->cmd_ring->dequeue != command->command_trb)
return 0;
command->status = GET_COMP_CODE(le32_to_cpu(event->status));
list_del(&command->cmd_list);
if (command->completion)
complete(command->completion);
else
xhci_free_command(xhci, command);
return 1;
}
static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_virt_device *virt_dev;
unsigned int ep_index;
struct xhci_ring *ep_ring;
unsigned int ep_state;
cmd_dma = le64_to_cpu(event->cmd_trb);
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
xhci->cmd_ring->dequeue);
/* Is the command ring deq ptr out of sync with the deq seg ptr? */
if (cmd_dequeue_dma == 0) {
xhci->error_bitmask |= 1 << 4;
return;
}
/* Does the DMA address match our internal dequeue pointer address? */
if (cmd_dma != (u64) cmd_dequeue_dma) {
xhci->error_bitmask |= 1 << 5;
return;
}
switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
& TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_ENABLE_SLOT):
if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
xhci->slot_id = slot_id;
else
xhci->slot_id = 0;
complete(&xhci->addr_dev);
break;
case TRB_TYPE(TRB_DISABLE_SLOT):
if (xhci->devs[slot_id]) {
if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
/* Delete default control endpoint resources */
xhci_free_device_endpoint_resources(xhci,
xhci->devs[slot_id], true);
xhci_free_virt_device(xhci, slot_id);
}
break;
case TRB_TYPE(TRB_CONFIG_EP):
virt_dev = xhci->devs[slot_id];
if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
break;
/*
* Configure endpoint commands can come from the USB core
* configuration or alt setting changes, or because the HW
* needed an extra configure endpoint command after a reset
* endpoint command or streams were being configured.
* If the command was for a halted endpoint, the xHCI driver
* is not waiting on the configure endpoint command.
*/
ctrl_ctx = xhci_get_input_control_ctx(xhci,
virt_dev->in_ctx);
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
/* A usb_set_interface() call directly after clearing a halted
* condition may race on this quirky hardware. Not worth
* worrying about, since this is prototype hardware. Not sure
* if this will work for streams, but streams support was
* untested on this prototype.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
ep_index != (unsigned int) -1 &&
le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
le32_to_cpu(ctrl_ctx->drop_flags)) {
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
if (!(ep_state & EP_HALTED))
goto bandwidth_change;
xhci_dbg(xhci, "Completed config ep cmd - "
"last ep index = %d, state = %d\n",
ep_index, ep_state);
/* Clear internal halted state and restart ring(s) */
xhci->devs[slot_id]->eps[ep_index].ep_state &=
~EP_HALTED;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
break;
}
bandwidth_change:
xhci_dbg(xhci, "Completed config ep cmd\n");
xhci->devs[slot_id]->cmd_status =
GET_COMP_CODE(le32_to_cpu(event->status));
complete(&xhci->devs[slot_id]->cmd_completion);
break;
case TRB_TYPE(TRB_EVAL_CONTEXT):
virt_dev = xhci->devs[slot_id];
if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
break;
xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
complete(&xhci->devs[slot_id]->cmd_completion);
break;
case TRB_TYPE(TRB_ADDR_DEV):
xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
complete(&xhci->addr_dev);
break;
case TRB_TYPE(TRB_STOP_RING):
handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
break;
case TRB_TYPE(TRB_SET_DEQ):
handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
break;
case TRB_TYPE(TRB_CMD_NOOP):
break;
case TRB_TYPE(TRB_RESET_EP):
handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
break;
case TRB_TYPE(TRB_RESET_DEV):
xhci_dbg(xhci, "Completed reset device command.\n");
slot_id = TRB_TO_SLOT_ID(
le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
virt_dev = xhci->devs[slot_id];
if (virt_dev)
handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
else
xhci_warn(xhci, "Reset device command completion "
"for disabled slot %u\n", slot_id);
break;
case TRB_TYPE(TRB_NEC_GET_FW):
if (!(xhci->quirks & XHCI_NEC_HOST)) {
xhci->error_bitmask |= 1 << 6;
break;
}
xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
NEC_FW_MAJOR(le32_to_cpu(event->status)),
NEC_FW_MINOR(le32_to_cpu(event->status)));
break;
default:
/* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6;
break;
}
inc_deq(xhci, xhci->cmd_ring);
}
static void handle_vendor_event(struct xhci_hcd *xhci,
union xhci_trb *event)
{
u32 trb_type;
trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
handle_cmd_completion(xhci, &event->event_cmd);
}
/* @port_id: the one-based port ID from the hardware (indexed from array of all
* port registers -- USB 3.0 and USB 2.0).
*
* Returns a zero-based port number, which is suitable for indexing into each of
* the split roothubs' port arrays and bus state arrays.
* Add one to it in order to call xhci_find_slot_id_by_port.
*/
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
struct xhci_hcd *xhci, u32 port_id)
{
unsigned int i;
unsigned int num_similar_speed_ports = 0;
/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
* and usb2_ports are 0-based indexes. Count the number of similar
* speed ports, up to 1 port before this port.
*/
for (i = 0; i < (port_id - 1); i++) {
u8 port_speed = xhci->port_array[i];
/*
* Skip ports that don't have known speeds, or have duplicate
* Extended Capabilities port speed entries.
*/
if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
continue;
/*
* USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
* 1.1 ports are under the USB 2.0 hub. If the port speed
* matches the device speed, it's a similar speed port.
*/
if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
num_similar_speed_ports++;
}
return num_similar_speed_ports;
}
static void handle_device_notification(struct xhci_hcd *xhci,
union xhci_trb *event)
{
u32 slot_id;
struct usb_device *udev;
slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
if (!xhci->devs[slot_id]) {
xhci_warn(xhci, "Device Notification event for "
"unused slot %u\n", slot_id);
return;
}
xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
slot_id);
udev = xhci->devs[slot_id]->udev;
if (udev && udev->parent)
usb_wakeup_notification(udev->parent, udev->portnum);
}
static void handle_port_status(struct xhci_hcd *xhci,
union xhci_trb *event)
{
struct usb_hcd *hcd;
u32 port_id;
u32 temp, temp1;
int max_ports;
int slot_id;
unsigned int faked_port_index;
u8 major_revision;
struct xhci_bus_state *bus_state;
__le32 __iomem **port_array;
bool bogus_port_status = false;
/* Port status change events always have a successful completion code */
if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
xhci->error_bitmask |= 1 << 8;
}
port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Invalid port id %d\n", port_id);
bogus_port_status = true;
goto cleanup;
}
/* Figure out which usb_hcd this port is attached to:
* is it a USB 3.0 port or a USB 2.0/1.1 port?
*/
major_revision = xhci->port_array[port_id - 1];
if (major_revision == 0) {
xhci_warn(xhci, "Event for port %u not in "
"Extended Capabilities, ignoring.\n",
port_id);
bogus_port_status = true;
goto cleanup;
}
if (major_revision == DUPLICATE_ENTRY) {
xhci_warn(xhci, "Event for port %u duplicated in"
"Extended Capabilities, ignoring.\n",
port_id);
bogus_port_status = true;
goto cleanup;
}
/*
* Hardware port IDs reported by a Port Status Change Event include USB
* 3.0 and USB 2.0 ports. We want to check if the port has reported a
* resume event, but we first need to translate the hardware port ID
* into the index into the ports on the correct split roothub, and the
* correct bus_state structure.
*/
/* Find the right roothub. */
hcd = xhci_to_hcd(xhci);
if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
hcd = xhci->shared_hcd;
bus_state = &xhci->bus_state[hcd_index(hcd)];
if (hcd->speed == HCD_USB3)
port_array = xhci->usb3_ports;
else
port_array = xhci->usb2_ports;
/* Find the faked port hub number */
faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
port_id);
temp = xhci_readl(xhci, port_array[faked_port_index]);
if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
usb_hcd_resume_root_hub(hcd);
}
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
temp1 = xhci_readl(xhci, &xhci->op_regs->command);
if (!(temp1 & CMD_RUN)) {
xhci_warn(xhci, "xHC is not running.\n");
goto cleanup;
}
if (DEV_SUPERSPEED(temp)) {
xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
/* Set a flag to say the port signaled remote wakeup,
* so we can tell the difference between the end of
* device and host initiated resume.
*/
bus_state->port_remote_wakeup |= 1 << faked_port_index;
xhci_test_and_clear_bit(xhci, port_array,
faked_port_index, PORT_PLC);
xhci_set_link_state(xhci, port_array, faked_port_index,
XDEV_U0);
/* Need to wait until the next link state change
* indicates the device is actually in U0.
*/
bogus_port_status = true;
goto cleanup;
} else {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
bus_state->resume_done[faked_port_index] = jiffies +
msecs_to_jiffies(20);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[faked_port_index]);
/* Do the rest in GetPortStatus */
}
}
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
DEV_SUPERSPEED(temp)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
/* We've just brought the device into U0 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
* initiated remote wake, don't pass up the link state change,
* so the roothub behavior is consistent with external
* USB 3.0 hub behavior.
*/
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
faked_port_index + 1);
if (slot_id && xhci->devs[slot_id])
xhci_ring_device(xhci, slot_id);
if (bus_state->port_remote_wakeup && (1 << faked_port_index)) {
bus_state->port_remote_wakeup &=
~(1 << faked_port_index);
xhci_test_and_clear_bit(xhci, port_array,
faked_port_index, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
faked_port_index + 1);
bogus_port_status = true;
goto cleanup;
}
}
if (hcd->speed != HCD_USB3)
xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
PORT_PLC);
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, xhci->event_ring);
/* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub
* (USB 2.0 or USB 3.0) to kick.
*/
if (bogus_port_status)
return;
spin_unlock(&xhci->lock);
/* Pass this up to the core */
usb_hcd_poll_rh_status(hcd);
spin_lock(&xhci->lock);
}
/*
* This TD is defined by the TRBs starting at start_trb in start_seg and ending
* at end_trb, which may be in another segment. If the suspect DMA address is a
* TRB in this TD, this function returns that TRB's segment. Otherwise it
* returns 0.
*/
struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
union xhci_trb *start_trb,
union xhci_trb *end_trb,
dma_addr_t suspect_dma)
{
dma_addr_t start_dma;
dma_addr_t end_seg_dma;
dma_addr_t end_trb_dma;
struct xhci_segment *cur_seg;
start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
cur_seg = start_seg;
do {
if (start_dma == 0)
return NULL;
/* We may get an event for a Link TRB in the middle of a TD */
end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
/* If the end TRB isn't in this segment, this is set to 0 */
end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
if (end_trb_dma > 0) {
/* The end TRB is in this segment, so suspect should be here */
if (start_dma <= end_trb_dma) {
if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
return cur_seg;
} else {
/* Case for one segment with
* a TD wrapped around to the top
*/
if ((suspect_dma >= start_dma &&
suspect_dma <= end_seg_dma) ||
(suspect_dma >= cur_seg->dma &&
suspect_dma <= end_trb_dma))
return cur_seg;
}
return NULL;
} else {
/* Might still be somewhere in this segment */
if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
return cur_seg;
}
cur_seg = cur_seg->next;
start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
} while (cur_seg != start_seg);
return NULL;
}
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_td *td, union xhci_trb *event_trb)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
ep->ep_state |= EP_HALTED;
ep->stopped_td = td;
ep->stopped_trb = event_trb;
ep->stopped_stream = stream_id;
xhci_queue_reset_ep(xhci, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
ep->stopped_td = NULL;
ep->stopped_trb = NULL;
ep->stopped_stream = 0;
xhci_ring_cmd_db(xhci);
}
/* Check if an error has halted the endpoint ring. The class driver will
* cleanup the halt for a non-default control endpoint if we indicate a stall.
* However, a babble and other errors also halt the endpoint ring, and the class
* driver won't clear the halt in that case, so we need to issue a Set Transfer
* Ring Dequeue Pointer command manually.
*/
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
unsigned int trb_comp_code)
{
/* TRB completion codes that may require a manual halt cleanup */
if (trb_comp_code == COMP_TX_ERR ||
trb_comp_code == COMP_BABBLE ||
trb_comp_code == COMP_SPLIT_ERR)
/* The 0.96 spec says a babbling control endpoint
* is not halted. The 0.96 spec says it is. Some HW
* claims to be 0.95 compliant, but it halts the control
* endpoint anyway. Check if a babble halted the
* endpoint.
*/
if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
cpu_to_le32(EP_STATE_HALTED))
return 1;
return 0;
}
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
if (trb_comp_code >= 224 && trb_comp_code <= 255) {
/* Vendor defined "informational" completion code,
* treat as not-an-error.
*/
xhci_dbg(xhci, "Vendor defined info completion code %u\n",
trb_comp_code);
xhci_dbg(xhci, "Treating code as success.\n");
return 1;
}
return 0;
}
/*
* Finish the td processing, remove the td from td list;
* Return 1 if the urb can be given back.
*/
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *event_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status, bool skip)
{
struct xhci_virt_device *xdev;
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
struct urb *urb = NULL;
struct xhci_ep_ctx *ep_ctx;
int ret = 0;
struct urb_priv *urb_priv;
u32 trb_comp_code;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
if (skip)
goto td_cleanup;
if (trb_comp_code == COMP_STOP_INVAL ||
trb_comp_code == COMP_STOP) {
/* The Endpoint Stop Command completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
ep->stopped_td = td;
ep->stopped_trb = event_trb;
return 0;
} else {
if (trb_comp_code == COMP_STALL) {
/* The transfer is completed from the driver's
* perspective, but we need to issue a set dequeue
* command for this stalled endpoint to move the dequeue
* pointer past the TD. We can't do that here because
* the halt condition must be cleared first. Let the
* USB class driver clear the stall later.
*/
ep->stopped_td = td;
ep->stopped_trb = event_trb;
ep->stopped_stream = ep_ring->stream_id;
} else if (xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code)) {
/* Other types of errors halt the endpoint, but the
* class driver doesn't call usb_reset_endpoint() unless
* the error is -EPIPE. Clear the halted status in the
* xHCI hardware manually.
*/
xhci_cleanup_halted_endpoint(xhci,
slot_id, ep_index, ep_ring->stream_id,
td, event_trb);
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring);
}
td_cleanup:
/* Clean up the endpoint's TD list */
urb = td->urb;
urb_priv = urb->hcpriv;
/* Do one last check of the actual transfer length.
* If the host controller said we transferred more data than
* the buffer length, urb->actual_length will be a very big
* number (since it's unsigned). Play it safe and say we didn't
* transfer anything.
*/
if (urb->actual_length > urb->transfer_buffer_length) {
xhci_warn(xhci, "URB transfer length is wrong, "
"xHC issue? req. len = %u, "
"act. len = %u\n",
urb->transfer_buffer_length,
urb->actual_length);
urb->actual_length = 0;
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
}
list_del_init(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
urb_priv->td_cnt++;
/* Giveback the urb when all the tds are completed */
if (urb_priv->td_cnt == urb_priv->length) {
ret = 1;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
== 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
}
}
}
return ret;
}
/*
* Process control tds, update urb status and actual_length.
*/
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *event_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_virt_device *xdev;
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
switch (trb_comp_code) {
case COMP_SUCCESS:
if (event_trb == ep_ring->dequeue) {
xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
"without IOC set??\n");
*status = -ESHUTDOWN;
} else if (event_trb != td->last_trb) {
xhci_warn(xhci, "WARN: Success on ctrl data TRB "
"without IOC set??\n");
*status = -ESHUTDOWN;
} else {
*status = 0;
}
break;
case COMP_SHORT_TX:
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
break;
case COMP_STOP_INVAL:
case COMP_STOP:
return finish_td(xhci, td, event_trb, event, ep, status, false);
default:
if (!xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code))
break;
xhci_dbg(xhci, "TRB error code %u, "
"halted endpoint index = %u\n",
trb_comp_code, ep_index);
/* else fall through */
case COMP_STALL:
/* Did we transfer part of the data (middle) phase? */
if (event_trb != ep_ring->dequeue &&
event_trb != td->last_trb)
td->urb->actual_length =
td->urb->transfer_buffer_length
- TRB_LEN(le32_to_cpu(event->transfer_len));
else
td->urb->actual_length = 0;
xhci_cleanup_halted_endpoint(xhci,
slot_id, ep_index, 0, td, event_trb);
return finish_td(xhci, td, event_trb, event, ep, status, true);
}
/*
* Did we transfer any data, despite the errors that might have
* happened? I.e. did we get past the setup stage?
*/
if (event_trb != ep_ring->dequeue) {
/* The event was for the status stage */
if (event_trb == td->last_trb) {
if (td->urb->actual_length != 0) {
/* Don't overwrite a previously set error code
*/
if ((*status == -EINPROGRESS || *status == 0) &&
(td->urb->transfer_flags
& URB_SHORT_NOT_OK))
/* Did we already see a short data
* stage? */
*status = -EREMOTEIO;
} else {
td->urb->actual_length =
td->urb->transfer_buffer_length;
}
} else {
/* Maybe the event was for the data stage? */
td->urb->actual_length =
td->urb->transfer_buffer_length -
TRB_LEN(le32_to_cpu(event->transfer_len));
xhci_dbg(xhci, "Waiting for status "
"stage event\n");
return 0;
}
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
/*
* Process isochronous tds, update urb packet status and actual_length.
*/
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *event_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
int idx;
int len = 0;
union xhci_trb *cur_trb;
struct xhci_segment *cur_seg;
struct usb_iso_packet_descriptor *frame;
u32 trb_comp_code;
bool skip_td = false;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
frame->status = 0;
break;
case COMP_SHORT_TX:
frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
-EREMOTEIO : 0;
break;
case COMP_BW_OVER:
frame->status = -ECOMM;
skip_td = true;
break;
case COMP_BUFF_OVER:
case COMP_BABBLE:
frame->status = -EOVERFLOW;
skip_td = true;
break;
case COMP_DEV_ERR:
case COMP_STALL:
frame->status = -EPROTO;
skip_td = true;
break;
case COMP_STOP:
case COMP_STOP_INVAL:
break;
default:
frame->status = -1;
break;
}
if (trb_comp_code == COMP_SUCCESS || skip_td) {
frame->actual_length = frame->length;
td->urb->actual_length += frame->length;
} else {
for (cur_trb = ep_ring->dequeue,
cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
!TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
}
len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
TRB_LEN(le32_to_cpu(event->transfer_len));
if (trb_comp_code != COMP_STOP_INVAL) {
frame->actual_length = len;
td->urb->actual_length += len;
}
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct usb_iso_packet_descriptor *frame;
int idx;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
/* The transfer is partly done. */
frame->status = -EXDEV;
/* calc actual length */
frame->actual_length = 0;
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring);
return finish_td(xhci, td, NULL, event, ep, status, true);
}
/*
* Process bulk and interrupt tds, update urb status and actual_length.
*/
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *event_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
union xhci_trb *cur_trb;
struct xhci_segment *cur_seg;
u32 trb_comp_code;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
switch (trb_comp_code) {
case COMP_SUCCESS:
/* Double check that the HW transferred everything. */
if (event_trb != td->last_trb) {
xhci_warn(xhci, "WARN Successful completion "
"on short TX\n");
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
} else {
*status = 0;
}
break;
case COMP_SHORT_TX:
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
break;
default:
/* Others already handled above */
break;
}
if (trb_comp_code == COMP_SHORT_TX)
xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
"%d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress,
td->urb->transfer_buffer_length,
TRB_LEN(le32_to_cpu(event->transfer_len)));
/* Fast path - was this the last TRB in the TD for this URB? */
if (event_trb == td->last_trb) {
if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
td->urb->actual_length =
td->urb->transfer_buffer_length -
TRB_LEN(le32_to_cpu(event->transfer_len));
if (td->urb->transfer_buffer_length <
td->urb->actual_length) {
xhci_warn(xhci, "HC gave bad length "
"of %d bytes left\n",
TRB_LEN(le32_to_cpu(event->transfer_len)));
td->urb->actual_length = 0;
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
}
/* Don't overwrite a previously set error code */
if (*status == -EINPROGRESS) {
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
*status = 0;
}
} else {
td->urb->actual_length =
td->urb->transfer_buffer_length;
/* Ignore a short packet completion if the
* untransferred length was zero.
*/
if (*status == -EREMOTEIO)
*status = 0;
}
} else {
/* Slow path - walk the list, starting from the dequeue
* pointer, to get the actual length transferred.
*/
td->urb->actual_length = 0;
for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
!TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
td->urb->actual_length +=
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
}
/* If the ring didn't stop on a Link or No-op TRB, add
* in the actual bytes transferred from the Normal TRB
*/
if (trb_comp_code != COMP_STOP_INVAL)
td->urb->actual_length +=
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
TRB_LEN(le32_to_cpu(event->transfer_len));
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
* At this point, the host controller is probably hosed and should be reset.
*/
static int handle_tx_event(struct xhci_hcd *xhci,
struct xhci_transfer_event *event)
{
struct xhci_virt_device *xdev;
struct xhci_virt_ep *ep;
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
struct xhci_td *td = NULL;
dma_addr_t event_dma;
struct xhci_segment *event_seg;
union xhci_trb *event_trb;
struct urb *urb = NULL;
int status = -EINPROGRESS;
struct urb_priv *urb_priv;
struct xhci_ep_ctx *ep_ctx;
struct list_head *tmp;
u32 trb_comp_code;
int ret = 0;
int td_num = 0;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
xhci->event_ring->deq_seg,
xhci->event_ring->dequeue),
lower_32_bits(le64_to_cpu(event->buffer)),
upper_32_bits(le64_to_cpu(event->buffer)),
le32_to_cpu(event->transfer_len),
le32_to_cpu(event->flags));
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
/* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if (!ep_ring ||
(le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n");
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
xhci->event_ring->deq_seg,
xhci->event_ring->dequeue),
lower_32_bits(le64_to_cpu(event->buffer)),
upper_32_bits(le64_to_cpu(event->buffer)),
le32_to_cpu(event->transfer_len),
le32_to_cpu(event->flags));
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
/* Count current td numbers if ep->skip is set */
if (ep->skip) {
list_for_each(tmp, &ep_ring->td_list)
td_num++;
}
event_dma = le64_to_cpu(event->buffer);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
/* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
* transfer type
*/
case COMP_SUCCESS:
case COMP_SHORT_TX:
break;
case COMP_STOP:
xhci_dbg(xhci, "Stopped on Transfer TRB\n");
break;
case COMP_STOP_INVAL:
xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
break;
case COMP_STALL:
xhci_dbg(xhci, "Stalled endpoint\n");
ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
case COMP_TRB_ERR:
xhci_warn(xhci, "WARN: TRB error on endpoint\n");
status = -EILSEQ;
break;
case COMP_SPLIT_ERR:
case COMP_TX_ERR:
xhci_dbg(xhci, "Transfer error on endpoint\n");
status = -EPROTO;
break;
case COMP_BABBLE:
xhci_dbg(xhci, "Babble error on endpoint\n");
status = -EOVERFLOW;
break;
case COMP_DB_ERR:
xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
status = -ENOSR;
break;
case COMP_BW_OVER:
xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
break;
case COMP_BUFF_OVER:
xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
break;
case COMP_UNDERRUN:
/*
* When the Isoch ring is empty, the xHC will generate
* a Ring Overrun Event for IN Isoch endpoint or Ring
* Underrun Event for OUT Isoch endpoint.
*/
xhci_dbg(xhci, "underrun event on endpoint\n");
if (!list_empty(&ep_ring->td_list))
xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
"still with TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup;
case COMP_OVERRUN:
xhci_dbg(xhci, "overrun event on endpoint\n");
if (!list_empty(&ep_ring->td_list))
xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
"still with TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup;
case COMP_DEV_ERR:
xhci_warn(xhci, "WARN: detect an incompatible device");
status = -EPROTO;
break;
case COMP_MISSED_INT:
/*
* When encounter missed service error, one or more isoc tds
* may be missed by xHC.
* Set skip flag of the ep_ring; Complete the missed tds as
* short transfer when process the ep_ring next time.
*/
ep->skip = true;
xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
goto cleanup;
default:
if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
break;
}
xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
"busted\n");
goto cleanup;
}
do {
/* This TRB should be in the TD at the head of this ring's
* TD list.
*/
if (list_empty(&ep_ring->td_list)) {
xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
"with no TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
(le32_to_cpu(event->flags) &
TRB_TYPE_BITMASK)>>10);
xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
if (ep->skip) {
ep->skip = false;
xhci_dbg(xhci, "td_list is empty while skip "
"flag set. Clear skip flag.\n");
}
ret = 0;
goto cleanup;
}
/* We've skipped all the TDs on the ep ring when ep->skip set */
if (ep->skip && td_num == 0) {
ep->skip = false;
xhci_dbg(xhci, "All tds on the ep_ring skipped. "
"Clear skip flag.\n");
ret = 0;
goto cleanup;
}
td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
if (ep->skip)
td_num--;
/* Is this a TRB in the currently executing TD? */
event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
td->last_trb, event_dma);
/*
* Skip the Force Stopped Event. The event_trb(event_dma) of FSE
* is not in the current TD pointed by ep_ring->dequeue because
* that the hardware dequeue pointer still at the previous TRB
* of the current TD. The previous TRB maybe a Link TD or the
* last TRB of the previous TD. The command completion handle
* will take care the rest.
*/
if (!event_seg && (trb_comp_code == COMP_STOP ||
trb_comp_code == COMP_STOP_INVAL)) {
ret = 0;
goto cleanup;
}
if (!event_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
/* Some host controllers give a spurious
* successful event after a short transfer.
* Ignore it.
*/
if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
ret = 0;
goto cleanup;
}
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
"part of current TD\n");
return -ESHUTDOWN;
}
ret = skip_isoc_td(xhci, td, event, ep, &status);
goto cleanup;
}
if (trb_comp_code == COMP_SHORT_TX)
ep_ring->last_td_was_short = true;
else
ep_ring->last_td_was_short = false;
if (ep->skip) {
xhci_dbg(xhci, "Found td. Clear skip flag.\n");
ep->skip = false;
}
event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
sizeof(*event_trb)];
/*
* No-op TRB should not trigger interrupts.
* If event_trb is a no-op TRB, it means the
* corresponding TD has been cancelled. Just ignore
* the TD.
*/
if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
xhci_dbg(xhci,
"event_trb is a no-op TRB. Skip it\n");
goto cleanup;
}
/* Now update the urb's actual_length and give back to
* the core
*/
if (usb_endpoint_xfer_control(&td->urb->ep->desc))
ret = process_ctrl_td(xhci, td, event_trb, event, ep,
&status);
else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
ret = process_isoc_td(xhci, td, event_trb, event, ep,
&status);
else
ret = process_bulk_intr_td(xhci, td, event_trb, event,
ep, &status);
cleanup:
/*
* Do not update event ring dequeue pointer if ep->skip is set.
* Will roll back to continue process missed tds.
*/
if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
inc_deq(xhci, xhci->event_ring);
}
if (ret) {
urb = td->urb;
urb_priv = urb->hcpriv;
/* Leave the TD around for the reset endpoint function
* to use(but only if it's not a control endpoint,
* since we already queued the Set TR dequeue pointer
* command for stalled control endpoints).
*/
if (usb_endpoint_xfer_control(&urb->ep->desc) ||
(trb_comp_code != COMP_STALL &&
trb_comp_code != COMP_BABBLE))
xhci_urb_free_priv(xhci, urb_priv);
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
if ((urb->actual_length != urb->transfer_buffer_length &&
(urb->transfer_flags &
URB_SHORT_NOT_OK)) ||
(status != 0 &&
!usb_endpoint_xfer_isoc(&urb->ep->desc)))
xhci_dbg(xhci, "Giveback URB %p, len = %d, "
"expected = %x, status = %d\n",
urb, urb->actual_length,
urb->transfer_buffer_length,
status);
spin_unlock(&xhci->lock);
/* EHCI, UHCI, and OHCI always unconditionally set the
* urb->status of an isochronous endpoint to 0.
*/
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
status = 0;
usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
}
/*
* If ep->skip is set, it means there are missed tds on the
* endpoint ring need to take care of.
* Process them as short transfer until reach the td pointed by
* the event.
*/
} while (ep->skip && trb_comp_code != COMP_MISSED_INT);
return 0;
}
/*
* This function handles all OS-owned events on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
* Returns >0 for "possibly more events to process" (caller should call again),
* otherwise 0 if done. In future, <0 returns should indicate error code.
*/
static int xhci_handle_event(struct xhci_hcd *xhci)
{
union xhci_trb *event;
int update_ptrs = 1;
int ret;
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci->error_bitmask |= 1 << 1;
return 0;
}
event = xhci->event_ring->dequeue;
/* Does the HC or OS own the TRB? */
if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
xhci->event_ring->cycle_state) {
xhci->error_bitmask |= 1 << 2;
return 0;
}
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
* speculative reads of the event's flags/data below.
*/
rmb();
/* FIXME: Handle more event types. */
switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
case TRB_TYPE(TRB_COMPLETION):
handle_cmd_completion(xhci, &event->event_cmd);
break;
case TRB_TYPE(TRB_PORT_STATUS):
handle_port_status(xhci, event);
update_ptrs = 0;
break;
case TRB_TYPE(TRB_TRANSFER):
ret = handle_tx_event(xhci, &event->trans_event);
if (ret < 0)
xhci->error_bitmask |= 1 << 9;
else
update_ptrs = 0;
break;
case TRB_TYPE(TRB_DEV_NOTE):
handle_device_notification(xhci, event);
break;
default:
if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
TRB_TYPE(48))
handle_vendor_event(xhci, event);
else
xhci->error_bitmask |= 1 << 3;
}
/* Any of the above functions may drop and re-acquire the lock, so check
* to make sure a watchdog timer didn't mark the host as non-responsive.
*/
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "xHCI host dying, returning from "
"event handler.\n");
return 0;
}
if (update_ptrs)
/* Update SW event ring dequeue pointer */
inc_deq(xhci, xhci->event_ring);
/* Are there more items on the event ring? Caller will call us again to
* check.
*/
return 1;
}
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
* indicators of an event TRB error, but we check the status *first* to be safe.
*/
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 status;
union xhci_trb *trb;
u64 temp_64;
union xhci_trb *event_ring_deq;
dma_addr_t deq;
spin_lock(&xhci->lock);
trb = xhci->event_ring->dequeue;
/* Check if the xHC generated the interrupt, or the irq is shared */
status = xhci_readl(xhci, &xhci->op_regs->status);
if (status == 0xffffffff)
goto hw_died;
if (!(status & STS_EINT)) {
spin_unlock(&xhci->lock);
return IRQ_NONE;
}
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
hw_died:
spin_unlock(&xhci->lock);
return -ESHUTDOWN;
}
/*
* Clear the op reg interrupt status first,
* so we can receive interrupts from other MSI-X interrupters.
* Write 1 to clear the interrupt status.
*/
status |= STS_EINT;
xhci_writel(xhci, status, &xhci->op_regs->status);
/* FIXME when MSI-X is supported and there are multiple vectors */
/* Clear the MSI-X event interrupt status */
if (hcd->irq) {
u32 irq_pending;
/* Acknowledge the PCI interrupt */
irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
irq_pending |= IMAN_IP;
xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
}
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
"Shouldn't IRQs be disabled?\n");
/* Clear the event handler busy flag (RW1C);
* the event ring should be empty.
*/
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
xhci_write_64(xhci, temp_64 | ERST_EHB,
&xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
return IRQ_HANDLED;
}
event_ring_deq = xhci->event_ring->dequeue;
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
while (xhci_handle_event(xhci) > 0) {}
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
if (event_ring_deq != xhci->event_ring->dequeue) {
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
xhci->event_ring->dequeue);
if (deq == 0)
xhci_warn(xhci, "WARN something wrong with SW event "
"ring dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
temp_64 &= ERST_PTR_MASK;
temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
}
/* Clear the event handler busy flag (RW1C); event ring is empty. */
temp_64 |= ERST_EHB;
xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
return IRQ_HANDLED;
}
irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
{
return xhci_irq(hcd);
}
/**** Endpoint Ring Operations ****/
/*
* Generic function for queueing a TRB on a ring.
* The caller must have checked to make sure there's room on the ring.
*
* @more_trbs_coming: Will you enqueue more TRBs before calling
* prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool more_trbs_coming,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
trb = &ring->enqueue->generic;
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4);
inc_enq(xhci, ring, more_trbs_coming);
}
/*
* Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
* FIXME allocate segments if the ring is full.
*/
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
unsigned int num_trbs_needed;
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
case EP_STATE_DISABLED:
/*
* USB core changed config/interfaces without notifying us,
* or hardware is reporting the wrong state.
*/
xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
return -ENOENT;
case EP_STATE_ERROR:
xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
/* FIXME event handling code for error needs to clear it */
/* XXX not sure if this should be -ENOENT or not */
return -EINVAL;
case EP_STATE_HALTED:
xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
case EP_STATE_STOPPED:
case EP_STATE_RUNNING:
break;
default:
xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
/*
* FIXME issue Configure Endpoint command to try to get the HC
* back into a known state.
*/
return -EINVAL;
}
while (1) {
if (room_on_ring(xhci, ep_ring, num_trbs))
break;
if (ep_ring == xhci->cmd_ring) {
xhci_err(xhci, "Do not support expand command ring\n");
return -ENOMEM;
}
xhci_dbg(xhci, "ERROR no room on ep ring, "
"try ring expansion\n");
num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
mem_flags)) {
xhci_err(xhci, "Ring expansion failed\n");
return -ENOMEM;
}
};
if (enqueue_is_link_trb(ep_ring)) {
struct xhci_ring *ring = ep_ring;
union xhci_trb *next;
next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) {
/* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit.
*/
if (!xhci_link_trb_quirk(xhci) &&
!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
next->link.control |= cpu_to_le32(TRB_CHAIN);
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
}
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
}
}
return 0;
}
static int prepare_transfer(struct xhci_hcd *xhci,
struct xhci_virt_device *xdev,
unsigned int ep_index,
unsigned int stream_id,
unsigned int num_trbs,
struct urb *urb,
unsigned int td_index,
gfp_t mem_flags)
{
int ret;
struct urb_priv *urb_priv;
struct xhci_td *td;
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
if (!ep_ring) {
xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
stream_id);
return -EINVAL;
}
ret = prepare_ring(xhci, ep_ring,
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, mem_flags);
if (ret)
return ret;
urb_priv = urb->hcpriv;
td = urb_priv->td[td_index];
INIT_LIST_HEAD(&td->td_list);
INIT_LIST_HEAD(&td->cancelled_td_list);
if (td_index == 0) {
ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
if (unlikely(ret))
return ret;
}
td->urb = urb;
/* Add this TD to the tail of the endpoint ring's TD list */
list_add_tail(&td->td_list, &ep_ring->td_list);
td->start_seg = ep_ring->enq_seg;
td->first_trb = ep_ring->enqueue;
urb_priv->td[td_index] = td;
return 0;
}
static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
{
int num_sgs, num_trbs, running_total, temp, i;
struct scatterlist *sg;
sg = NULL;
num_sgs = urb->num_mapped_sgs;
temp = urb->transfer_buffer_length;
num_trbs = 0;
for_each_sg(urb->sg, sg, num_sgs, i) {
unsigned int len = sg_dma_len(sg);
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
len = min_t(int, len, temp);
temp -= len;
if (temp == 0)
break;
}
return num_trbs;
}
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
running_total, running_total,
urb->transfer_buffer_length,
urb->transfer_buffer_length);
}
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id, int start_cycle,
struct xhci_generic_trb *start_trb)
{
/*
* Pass all the TRBs to the hardware at once and make sure this write
* isn't reordered.
*/
wmb();
if (start_cycle)
start_trb->field[3] |= cpu_to_le32(start_cycle);
else
start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
/*
* xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
* endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
* (comprised of sg list entries) can take several service intervals to
* transmit.
*/
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
xhci->devs[slot_id]->out_ctx, ep_index);
int xhci_interval;
int ep_interval;
xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval;
/* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
ep_interval *= 8;
/* FIXME change this to a warning and a suggestion to use the new API
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
if (printk_ratelimit())
dev_dbg(&urb->dev->dev, "Driver uses different interval"
" (%d microframe%s) than xHCI "
"(%d microframe%s)\n",
ep_interval,
ep_interval == 1 ? "" : "s",
xhci_interval,
xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/*
* The TD size is the number of bytes remaining in the TD (including this TRB),
* right shifted by 10.
* It must fit in bits 21:17, so it can't be bigger than 31.
*/
static u32 xhci_td_remainder(unsigned int remainder)
{
u32 max = (1 << (21 - 17 + 1)) - 1;
if ((remainder >> 10) >= max)
return max << 17;
else
return (remainder >> 10) << 17;
}
/*
* For xHCI 1.0 host controllers, TD size is the number of packets remaining in
* the TD (*not* including this TRB).
*
* Total TD packet count = total_packet_count =
* roundup(TD size in bytes / wMaxPacketSize)
*
* Packets transferred up to and including this TRB = packets_transferred =
* rounddown(total bytes transferred including this TRB / wMaxPacketSize)
*
* TD size = total_packet_count - packets_transferred
*
* It must fit in bits 21:17, so it can't be bigger than 31.
*/
static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
unsigned int total_packet_count, struct urb *urb)
{
int packets_transferred;
/* One TRB with a zero-length data packet. */
if (running_total == 0 && trb_buff_len == 0)
return 0;
/* All the TRB queueing functions don't count the current TRB in
* running_total.
*/
packets_transferred = (running_total + trb_buff_len) /
usb_endpoint_maxp(&urb->ep->desc);
return xhci_td_remainder(total_packet_count - packets_transferred);
}
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
unsigned int num_trbs;
struct urb_priv *urb_priv;
struct xhci_td *td;
struct scatterlist *sg;
int num_sgs;
int trb_buff_len, this_sg_len, running_total;
unsigned int total_packet_count;
bool first_trb;
u64 addr;
bool more_trbs_coming;
struct xhci_generic_trb *start_trb;
int start_cycle;
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ep_ring)
return -EINVAL;
num_trbs = count_sg_trbs_needed(xhci, urb);
num_sgs = urb->num_mapped_sgs;
total_packet_count = roundup(urb->transfer_buffer_length,
usb_endpoint_maxp(&urb->ep->desc));
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
urb_priv = urb->hcpriv;
td = urb_priv->td[0];
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
running_total = 0;
/*
* How much data is in the first TRB?
*
* There are three forces at work for TRB buffer pointers and lengths:
* 1. We don't want to walk off the end of this sg-list entry buffer.
* 2. The transfer length that the driver requested may be smaller than
* the amount of memory allocated for this scatter-gather list.
* 3. TRBs buffers can't cross 64KB boundaries.
*/
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
/* Queue the first TRB, even if it's zero-length */
do {
u32 field = 0;
u32 length_field = 0;
u32 remainder = 0;
/* Don't change the cycle bit of the first TRB until later */
if (first_trb) {
first_trb = false;
if (start_cycle == 0)
field |= 0x1;
} else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
if (num_trbs > 1) {
field |= TRB_CHAIN;
} else {
/* FIXME - add check for ZERO_PACKET flag before this */
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
}
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
if (TRB_MAX_BUFF_SIZE -
(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
}
/* Set the TRB length, TD size, and interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
urb->transfer_buffer_length -
running_total);
} else {
remainder = xhci_v1_0_td_remainder(running_total,
trb_buff_len, total_packet_count, urb);
}
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
if (num_trbs > 1)
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
field | TRB_TYPE(TRB_NORMAL));
--num_trbs;
running_total += trb_buff_len;
/* Calculate length for next transfer --
* Are we done queueing all the TRBs for this sg entry?
*/
this_sg_len -= trb_buff_len;
if (this_sg_len == 0) {
--num_sgs;
if (num_sgs == 0)
break;
sg = sg_next(sg);
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
} else {
addr += trb_buff_len;
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
(addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
urb->transfer_buffer_length - running_total;
} while (running_total < urb->transfer_buffer_length);
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
}
/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
int num_trbs;
struct xhci_generic_trb *start_trb;
bool first_trb;
bool more_trbs_coming;
int start_cycle;
u32 field, length_field;
int running_total, trb_buff_len, ret;
unsigned int total_packet_count;
u64 addr;
if (urb->num_sgs)
return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ep_ring)
return -EINVAL;
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
*/
if (running_total != 0 || urb->transfer_buffer_length == 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
while (running_total < urb->transfer_buffer_length) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
urb_priv = urb->hcpriv;
td = urb_priv->td[0];
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
running_total = 0;
total_packet_count = roundup(urb->transfer_buffer_length,
usb_endpoint_maxp(&urb->ep->desc));
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
/* Queue the first TRB, even if it's zero-length */
do {
u32 remainder = 0;
field = 0;
/* Don't change the cycle bit of the first TRB until later */
if (first_trb) {
first_trb = false;
if (start_cycle == 0)
field |= 0x1;
} else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
if (num_trbs > 1) {
field |= TRB_CHAIN;
} else {
/* FIXME - add check for ZERO_PACKET flag before this */
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
}
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
urb->transfer_buffer_length -
running_total);
} else {
remainder = xhci_v1_0_td_remainder(running_total,
trb_buff_len, total_packet_count, urb);
}
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
if (num_trbs > 1)
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
field | TRB_TYPE(TRB_NORMAL));
--num_trbs;
running_total += trb_buff_len;
/* Calculate length for next transfer */
addr += trb_buff_len;
trb_buff_len = urb->transfer_buffer_length - running_total;
if (trb_buff_len > TRB_MAX_BUFF_SIZE)
trb_buff_len = TRB_MAX_BUFF_SIZE;
} while (running_total < urb->transfer_buffer_length);
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
}
/* Caller must have locked xhci->lock */
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
int num_trbs;
int ret;
struct usb_ctrlrequest *setup;
struct xhci_generic_trb *start_trb;
int start_cycle;
u32 field, length_field;
struct urb_priv *urb_priv;
struct xhci_td *td;
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ep_ring)
return -EINVAL;
/*
* Need to copy setup packet into setup TRB, so we can't use the setup
* DMA address.
*/
if (!urb->setup_packet)
return -EINVAL;
/* 1 TRB for setup, 1 for status */
num_trbs = 2;
/*
* Don't need to check if we need additional event data and normal TRBs,
* since data in control transfers will never get bigger than 16MB
* XXX: can we get a buffer that crosses 64KB boundaries?
*/
if (urb->transfer_buffer_length > 0)
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
urb_priv = urb->hcpriv;
td = urb_priv->td[0];
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
/* Queue setup TRB - see section 6.4.1.2.1 */
/* FIXME better way to translate setup_packet into two u32 fields? */
setup = (struct usb_ctrlrequest *) urb->setup_packet;
field = 0;
field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
if (start_cycle == 0)
field |= 0x1;
/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
if (xhci->hci_version == 0x100) {
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_TX_TYPE(TRB_DATA_IN);
else
field |= TRB_TX_TYPE(TRB_DATA_OUT);
}
}
queue_trb(xhci, ep_ring, true,
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
/* Immediate data in pointer */
field);
/* If there's data, queue data TRBs */
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field = TRB_ISP | TRB_TYPE(TRB_DATA);
else
field = TRB_TYPE(TRB_DATA);
length_field = TRB_LEN(urb->transfer_buffer_length) |
xhci_td_remainder(urb->transfer_buffer_length) |
TRB_INTR_TARGET(0);
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
queue_trb(xhci, ep_ring, true,
lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma),
length_field,
field | ep_ring->cycle_state);
}
/* Save the DMA address of the last TRB in the TD */
td->last_trb = ep_ring->enqueue;
/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
/* If the device sent data, the status stage is an OUT transfer */
if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
field = 0;
else
field = TRB_DIR_IN;
queue_trb(xhci, ep_ring, false,
0,
0,
TRB_INTR_TARGET(0),
/* Event on completion */
field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
giveback_first_trb(xhci, slot_id, ep_index, 0,
start_cycle, start_trb);
return 0;
}
static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
struct urb *urb, int i)
{
int num_trbs = 0;
u64 addr, td_len;
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
TRB_MAX_BUFF_SIZE);
if (num_trbs == 0)
num_trbs++;
return num_trbs;
}
/*
* The transfer burst count field of the isochronous TRB defines the number of
* bursts that are required to move all packets in this TD. Only SuperSpeed
* devices can burst up to bMaxBurst number of packets per service interval.
* This field is zero based, meaning a value of zero in the field means one
* burst. Basically, for everything but SuperSpeed devices, this field will be
* zero. Only xHCI 1.0 host controllers support this field.
*/
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
struct usb_device *udev,
struct urb *urb, unsigned int total_packet_count)
{
unsigned int max_burst;
if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
return 0;
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
return roundup(total_packet_count, max_burst + 1) - 1;
}
/*
* Returns the number of packets in the last "burst" of packets. This field is
* valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
* the last burst packet count is equal to the total number of packets in the
* TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
* must contain (bMaxBurst + 1) number of packets, but the last burst can
* contain 1 to (bMaxBurst + 1) packets.
*/
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
struct usb_device *udev,
struct urb *urb, unsigned int total_packet_count)
{
unsigned int max_burst;
unsigned int residue;
if (xhci->hci_version < 0x100)
return 0;
switch (udev->speed) {
case USB_SPEED_SUPER:
/* bMaxBurst is zero based: 0 means 1 packet per burst */
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
residue = total_packet_count % (max_burst + 1);
/* If residue is zero, the last burst contains (max_burst + 1)
* number of packets, but the TLBPC field is zero-based.
*/
if (residue == 0)
return max_burst;
return residue - 1;
default:
if (total_packet_count == 0)
return 0;
return total_packet_count - 1;
}
}
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
int num_tds, trbs_per_td;
struct xhci_generic_trb *start_trb;
bool first_trb;
int start_cycle;
u32 field, length_field;
int running_total, trb_buff_len, td_len, td_remain_len, ret;
u64 start_addr, addr;
int i, j;
bool more_trbs_coming;
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
num_tds = urb->number_of_packets;
if (num_tds < 1) {
xhci_dbg(xhci, "Isoc URB with zero packets?\n");
return -EINVAL;
}
start_addr = (u64) urb->transfer_dma;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
urb_priv = urb->hcpriv;
/* Queue the first TRB, even if it's zero-length */
for (i = 0; i < num_tds; i++) {
unsigned int total_packet_count;
unsigned int burst_count;
unsigned int residue;
first_trb = true;
running_total = 0;
addr = start_addr + urb->iso_frame_desc[i].offset;
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
total_packet_count = roundup(td_len,
usb_endpoint_maxp(&urb->ep->desc));
/* A zero-length transfer still involves at least one packet. */
if (total_packet_count == 0)
total_packet_count++;
burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
total_packet_count);
residue = xhci_get_last_burst_packet_count(xhci,
urb->dev, urb, total_packet_count);
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
urb->stream_id, trbs_per_td, urb, i, mem_flags);
if (ret < 0) {
if (i == 0)
return ret;
goto cleanup;
}
td = urb_priv->td[i];
for (j = 0; j < trbs_per_td; j++) {
u32 remainder = 0;
field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
if (first_trb) {
/* Queue the isoc TRB */
field |= TRB_TYPE(TRB_ISOC);
/* Assume URB_ISO_ASAP is set */
field |= TRB_SIA;
if (i == 0) {
if (start_cycle == 0)
field |= 0x1;
} else
field |= ep_ring->cycle_state;
first_trb = false;
} else {
/* Queue other normal TRBs */
field |= TRB_TYPE(TRB_NORMAL);
field |= ep_ring->cycle_state;
}
/* Only set interrupt on short packet for IN EPs */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
/* Chain all the TRBs together; clear the chain bit in
* the last TRB to indicate it's the last TRB in the
* chain.
*/
if (j < trbs_per_td - 1) {
field |= TRB_CHAIN;
more_trbs_coming = true;
} else {
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
if (xhci->hci_version == 0x100) {
/* Set BEI bit except for the last td */
if (i < num_tds - 1)
field |= TRB_BEI;
}
more_trbs_coming = false;
}
/* Calculate TRB length */
trb_buff_len = TRB_MAX_BUFF_SIZE -
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
if (trb_buff_len > td_remain_len)
trb_buff_len = td_remain_len;
/* Set the TRB length, TD size, & interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
td_len - running_total);
} else {
remainder = xhci_v1_0_td_remainder(
running_total, trb_buff_len,
total_packet_count, urb);
}
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
field);
running_total += trb_buff_len;
addr += trb_buff_len;
td_remain_len -= trb_buff_len;
}
/* Check TD length */
if (running_total != td_len) {
xhci_err(xhci, "ISOC TD length unmatch\n");
ret = -EINVAL;
goto cleanup;
}
}
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_disable();
}
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
cleanup:
/* Clean up a partially enqueued isoc transfer. */
for (i--; i >= 0; i--)
list_del_init(&urb_priv->td[i]->td_list);
/* Use the first TD as a temporary variable to turn the TDs we've queued
* into No-ops with a software-owned cycle bit. That way the hardware
* won't accidentally start executing bogus TDs when we partially
* overwrite them. td->first_trb and td->start_seg are already set.
*/
urb_priv->td[0]->last_trb = ep_ring->enqueue;
/* Every TRB except the first & last will have its cycle bit flipped. */
td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
/* Reset the ring enqueue back to the first TRB and its cycle bit. */
ep_ring->enqueue = urb_priv->td[0]->first_trb;
ep_ring->enq_seg = urb_priv->td[0]->start_seg;
ep_ring->cycle_state = start_cycle;
ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
return ret;
}
/*
* Check transfer ring to guarantee there is enough room for the urb.
* Update ISO URB start_frame and interval.
* Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
* update the urb->start_frame by now.
* Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
*/
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_virt_device *xdev;
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx;
int start_frame;
int xhci_interval;
int ep_interval;
int num_tds, num_trbs, i;
int ret;
xdev = xhci->devs[slot_id];
ep_ring = xdev->eps[ep_index].ring;
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
num_trbs = 0;
num_tds = urb->number_of_packets;
for (i = 0; i < num_tds; i++)
num_trbs += count_isoc_trbs_needed(xhci, urb, i);
/* Check the ring to guarantee there is enough room for the whole urb.
* Do not insert any td of the urb to the ring if the check failed.
*/
ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, mem_flags);
if (ret)
return ret;
start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
start_frame &= 0x3fff;
urb->start_frame = start_frame;
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->start_frame >>= 3;
xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval;
/* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
ep_interval *= 8;
/* FIXME change this to a warning and a suggestion to use the new API
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
if (printk_ratelimit())
dev_dbg(&urb->dev->dev, "Driver uses different interval"
" (%d microframe%s) than xHCI "
"(%d microframe%s)\n",
ep_interval,
ep_interval == 1 ? "" : "s",
xhci_interval,
xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/**** Command Ring Operations ****/
/* Generic function for queueing a command TRB on the command ring.
* Check to make sure there's room on the command ring for one command TRB.
* Also check that there's room reserved for commands that must not fail.
* If this is a command that must not fail, meaning command_must_succeed = TRUE,
* then only check for the number of reserved spots.
* Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
* because the command event handler may want to resubmit a failed command.
*/
static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
u32 field3, u32 field4, bool command_must_succeed)
{
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
if (!command_must_succeed)
reserved_trbs++;
ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
reserved_trbs, GFP_ATOMIC);
if (ret < 0) {
xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed)
xhci_err(xhci, "ERR: Reserved TRB counting for "
"unfailable commands failed.\n");
return ret;
}
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
field4 | xhci->cmd_ring->cycle_state);
return 0;
}
/* Queue a slot enable or disable request on the command ring */
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
{
return queue_command(xhci, 0, 0, 0,
TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
}
/* Queue an address device command TRB */
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
false);
}
int xhci_queue_vendor_command(struct xhci_hcd *xhci,
u32 field1, u32 field2, u32 field3, u32 field4)
{
return queue_command(xhci, field1, field2, field3, field4, false);
}
/* Queue a reset device command TRB */
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
{
return queue_command(xhci, 0, 0, 0,
TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
false);
}
/* Queue a configure endpoint command TRB */
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
command_must_succeed);
}
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
false);
}
/*
* Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
* activity on an endpoint that is about to be suspended.
*/
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, int suspend)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_STOP_RING);
u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
return queue_command(xhci, 0, 0, 0,
trb_slot_id | trb_ep_index | type | trb_suspend, false);
}
/* Set Transfer Ring Dequeue Pointer command.
* This should not be used for endpoints that have streams enabled.
*/
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state)
{
dma_addr_t addr;
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
u32 type = TRB_TYPE(TRB_SET_DEQ);
struct xhci_virt_ep *ep;
addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
deq_seg, deq_ptr);
return 0;
}
ep = &xhci->devs[slot_id]->eps[ep_index];
if ((ep->ep_state & SET_DEQ_PENDING)) {
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
return 0;
}
ep->queued_deq_seg = deq_seg;
ep->queued_deq_ptr = deq_ptr;
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
false);
}
| ashikrobi/Crabbykernel | drivers/usb/host/xhci-ring.c | C | gpl-2.0 | 115,318 |
/*
* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/clk.h>
#include <mach/hardware.h>
#include <mach/iommu_domains.h>
#include <mach/iommu.h>
#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/fb.h>
#include <linux/msm_mdp.h>
#include <linux/file.h>
#include <linux/android_pmem.h>
#include <linux/major.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/msm_kgsl.h>
#include "mdp.h"
#include "msm_fb.h"
#include "mdp4.h"
#define VERSION_KEY_MASK 0xFFFFFF00
struct mdp4_overlay_ctrl {
struct mdp4_overlay_pipe plist[OVERLAY_PIPE_MAX];
struct mdp4_overlay_pipe *stage[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
struct mdp4_overlay_pipe *baselayer[MDP4_MIXER_MAX];
struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
uint32 mixer_cfg[MDP4_MIXER_MAX];
uint32 flush[MDP4_MIXER_MAX];
struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
struct iommu_free_list iommu_free_prev[MDP4_MIXER_MAX];
uint32 dmap_cfg[5];
uint32 cs_controller;
uint32 panel_3d;
uint32 panel_mode;
uint32 mixer0_played;
uint32 mixer1_played;
uint32 mixer2_played;
} mdp4_overlay_db = {
.cs_controller = CS_CONTROLLER_0,
.plist = {
{
.pipe_type = OVERLAY_TYPE_RGB,
.pipe_num = OVERLAY_PIPE_RGB1,
.pipe_ndx = 1,
},
{
.pipe_type = OVERLAY_TYPE_RGB,
.pipe_num = OVERLAY_PIPE_RGB2,
.pipe_ndx = 2,
},
{
.pipe_type = OVERLAY_TYPE_VIDEO,
.pipe_num = OVERLAY_PIPE_VG1,
.pipe_ndx = 3,
},
{
.pipe_type = OVERLAY_TYPE_VIDEO,
.pipe_num = OVERLAY_PIPE_VG2,
.pipe_ndx = 4,
},
{
.pipe_type = OVERLAY_TYPE_BF,
.pipe_num = OVERLAY_PIPE_RGB3,
.pipe_ndx = 5,
.mixer_num = MDP4_MIXER0,
},
{
.pipe_type = OVERLAY_TYPE_BF,
.pipe_num = OVERLAY_PIPE_VG3,
.pipe_ndx = 6,
.mixer_num = MDP4_MIXER1,
},
{
.pipe_type = OVERLAY_TYPE_BF,
.pipe_num = OVERLAY_PIPE_VG4,
.pipe_ndx = 7,
.mixer_num = MDP4_MIXER2,
},
},
};
static DEFINE_MUTEX(iommu_mutex);
static DEFINE_MUTEX(perf_mutex);
static struct mdp4_overlay_ctrl *ctrl = &mdp4_overlay_db;
struct mdp4_overlay_perf {
u32 mdp_clk_rate;
u32 use_ov_blt[MDP4_MIXER_MAX];
u64 mdp_ov_ab_bw[MDP4_MIXER_MAX];
u64 mdp_ov_ib_bw[MDP4_MIXER_MAX];
u32 mdp_ab_bw;
u32 mdp_ib_bw;
};
static struct mdp4_overlay_perf perf_request;
static struct mdp4_overlay_perf perf_current;
void mdp4_overlay_free_base_pipe(struct msm_fb_data_type *mfd)
{
if (!hdmi_prim_display && mfd->index == 0) {
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_free_base_pipe(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_free_base_pipe(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_free_base_pipe(mfd);
} else if (hdmi_prim_display || mfd->index == 1) {
mdp4_dtv_free_base_pipe(mfd);
}
}
static struct ion_client *display_iclient;
static int mdp4_map_sec_resource(struct msm_fb_data_type *mfd)
{
int ret = 0;
if (!mfd) {
pr_err("%s: mfd is invalid\n", __func__);
return -ENODEV;
}
pr_debug("%s %d mfd->index=%d,mapped=%d\n",
__func__, __LINE__,
mfd->index, mfd->sec_mapped);
if (mfd->sec_mapped)
return 0;
ret = mdp_enable_iommu_clocks();
if (ret) {
pr_err("IOMMU clock enabled failed while open");
return ret;
}
ret = msm_ion_secure_heap(ION_HEAP(ION_CP_MM_HEAP_ID));
if (ret)
pr_err("ION heap secure failed heap id %d ret %d\n",
ION_CP_MM_HEAP_ID, ret);
else
mfd->sec_mapped = 1;
mdp_disable_iommu_clocks();
return ret;
}
int mdp4_unmap_sec_resource(struct msm_fb_data_type *mfd)
{
int ret = 0;
int i, sec_cnt = 0;
struct mdp4_overlay_pipe *pipe;
if (!mfd) {
pr_err("%s: mfd is invalid\n", __func__);
return -ENODEV;
}
if (mfd->sec_mapped == 0)
return 0;
for (i = 0; i < OVERLAY_PIPE_MAX; i++) {
pipe = &ctrl->plist[i];
if ((pipe->mixer_num == mfd->index) &&
pipe->flags & MDP_SECURE_OVERLAY_SESSION)
sec_cnt++;
}
if (sec_cnt)
return 0;
pr_debug("%s %d mfd->index=%d,mapped=%d\n",
__func__, __LINE__,
mfd->index, mfd->sec_mapped);
ret = mdp_enable_iommu_clocks();
if (ret) {
pr_err("IOMMU clock enabled failed while close\n");
return ret;
}
msm_ion_unsecure_heap(ION_HEAP(ION_CP_MM_HEAP_ID));
mfd->sec_mapped = 0;
mdp_disable_iommu_clocks();
return ret;
}
/*
* mdp4_overlay_iommu_unmap_freelist()
* mdp4_overlay_iommu_2freelist()
* mdp4_overlay_iommu_pipe_free()
* above three functiosns need to be called from same thread and
* in order so that no mutex are needed.
*/
void mdp4_overlay_iommu_unmap_freelist(int mixer)
{
int i;
struct ion_handle *ihdl;
struct iommu_free_list *flist, *pflist;
if (mixer >= MDP4_MIXER_MAX)
return;
mutex_lock(&iommu_mutex);
pflist = &ctrl->iommu_free_prev[mixer];
flist = &ctrl->iommu_free[mixer];
pr_debug("%s: mixer=%d fndx=%d %d\n", __func__,
mixer, pflist->fndx, flist->fndx);
if (pflist->fndx == 0) {
goto flist_to_pflist;
}
for (i = 0; i < IOMMU_FREE_LIST_MAX; i++) {
ihdl = pflist->ihdl[i];
if (ihdl == NULL)
continue;
pr_debug("%s: mixer=%d i=%d ihdl=0x%p\n", __func__,
mixer, i, ihdl);
ion_unmap_iommu(display_iclient, ihdl, DISPLAY_READ_DOMAIN,
GEN_POOL);
mdp4_stat.iommu_unmap++;
pr_debug("%s: map=%d unmap=%d drop=%d\n", __func__,
(int)mdp4_stat.iommu_map, (int)mdp4_stat.iommu_unmap,
(int)mdp4_stat.iommu_drop);
ion_free(display_iclient, ihdl);
}
flist_to_pflist:
/* move flist to pflist*/
memcpy(pflist, flist, sizeof(*pflist));
memset(flist, 0, sizeof(*flist));
mutex_unlock(&iommu_mutex);
}
void mdp4_overlay_iommu_2freelist(int mixer, struct ion_handle *ihdl)
{
struct iommu_free_list *flist;
flist = &ctrl->iommu_free[mixer];
if (flist->fndx >= IOMMU_FREE_LIST_MAX) {
pr_err("%s: Error, mixer=%d iommu fndx=%d\n",
__func__, mixer, flist->fndx);
mdp4_stat.iommu_drop++;
return;
}
pr_debug("%s: add mixer=%d fndx=%d ihdl=0x%p\n", __func__,
mixer, flist->fndx, ihdl);
flist->ihdl[flist->fndx++] = ihdl;
}
void mdp4_overlay_iommu_pipe_free(int ndx, int all)
{
struct mdp4_overlay_pipe *pipe;
struct mdp4_iommu_pipe_info *iom;
int plane, mixer;
pipe = mdp4_overlay_ndx2pipe(ndx);
if (pipe == NULL)
return;
if (pipe->flags & MDP_MEMORY_ID_TYPE_FB) {
pipe->flags &= ~MDP_MEMORY_ID_TYPE_FB;
if (pipe->put0_need) {
fput_light(pipe->srcp0_file, pipe->put0_need);
pipe->put0_need = 0;
}
if (pipe->put1_need) {
fput_light(pipe->srcp1_file, pipe->put1_need);
pipe->put1_need = 0;
}
if (pipe->put2_need) {
fput_light(pipe->srcp2_file, pipe->put2_need);
pipe->put2_need = 0;
}
pr_debug("%s: ndx=%d flags=%x put=%d\n", __func__,
pipe->pipe_ndx, pipe->flags, pipe->put0_need);
return;
}
mutex_lock(&iommu_mutex);
mixer = pipe->mixer_num;
iom = &pipe->iommu;
pr_debug("%s: mixer=%d ndx=%d all=%d\n", __func__,
mixer, pipe->pipe_ndx, all);
for (plane = 0; plane < MDP4_MAX_PLANE; plane++) {
if (iom->prev_ihdl[plane]) {
mdp4_overlay_iommu_2freelist(mixer,
iom->prev_ihdl[plane]);
iom->prev_ihdl[plane] = NULL;
}
if (all && iom->ihdl[plane]) {
mdp4_overlay_iommu_2freelist(mixer, iom->ihdl[plane]);
iom->ihdl[plane] = NULL;
}
}
mutex_unlock(&iommu_mutex);
}
int mdp4_overlay_iommu_map_buf(int mem_id,
struct mdp4_overlay_pipe *pipe, unsigned int plane,
unsigned long *start, unsigned long *len,
struct ion_handle **srcp_ihdl)
{
struct mdp4_iommu_pipe_info *iom;
if (!display_iclient)
return -EINVAL;
*srcp_ihdl = ion_import_dma_buf(display_iclient, mem_id);
if (IS_ERR_OR_NULL(*srcp_ihdl)) {
pr_err("ion_import_dma_buf() failed\n");
return PTR_ERR(*srcp_ihdl);
}
pr_debug("%s(): ion_hdl %p, ion_buf %d\n", __func__, *srcp_ihdl,
ion_share_dma_buf(display_iclient, *srcp_ihdl));
pr_debug("mixer %u, pipe %u, plane %u\n", pipe->mixer_num,
pipe->pipe_ndx, plane);
if (ion_map_iommu(display_iclient, *srcp_ihdl,
DISPLAY_READ_DOMAIN, GEN_POOL, SZ_4K, 0, start,
len, 0, 0)) {
ion_free(display_iclient, *srcp_ihdl);
pr_err("ion_map_iommu() failed\n");
return -EINVAL;
}
mutex_lock(&iommu_mutex);
iom = &pipe->iommu;
if (iom->prev_ihdl[plane]) {
mdp4_overlay_iommu_2freelist(pipe->mixer_num,
iom->prev_ihdl[plane]);
mdp4_stat.iommu_drop++;
pr_err("%s: dropped, ndx=%d plane=%d\n", __func__,
pipe->pipe_ndx, plane);
}
iom->prev_ihdl[plane] = iom->ihdl[plane];
iom->ihdl[plane] = *srcp_ihdl;
mdp4_stat.iommu_map++;
pr_debug("%s: ndx=%d plane=%d prev=0x%p cur=0x%p start=0x%lx len=%lx\n",
__func__, pipe->pipe_ndx, plane, iom->prev_ihdl[plane],
iom->ihdl[plane], *start, *len);
mutex_unlock(&iommu_mutex);
return 0;
}
static struct mdp4_iommu_pipe_info mdp_iommu[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
void mdp4_iommu_unmap(struct mdp4_overlay_pipe *pipe)
{
struct mdp4_iommu_pipe_info *iom_pipe_info;
unsigned char i, j;
if (!display_iclient)
return;
for (j = 0; j < OVERLAY_PIPE_MAX; j++) {
iom_pipe_info = &mdp_iommu[pipe->mixer_num][j];
for (i = 0; i < MDP4_MAX_PLANE; i++) {
if (iom_pipe_info->prev_ihdl[i]) {
pr_debug("%s(): mixer %u, pipe %u, plane %u, "
"prev_ihdl %p\n", __func__,
pipe->mixer_num, j + 1, i,
iom_pipe_info->prev_ihdl[i]);
ion_unmap_iommu(display_iclient,
iom_pipe_info->prev_ihdl[i],
DISPLAY_READ_DOMAIN, GEN_POOL);
ion_free(display_iclient,
iom_pipe_info->prev_ihdl[i]);
iom_pipe_info->prev_ihdl[i] = NULL;
}
if (iom_pipe_info->mark_unmap) {
if (iom_pipe_info->ihdl[i]) {
pr_debug("%s(): MARK, mixer %u, pipe %u, plane %u, "
"ihdl %p\n", __func__,
pipe->mixer_num, j + 1, i,
iom_pipe_info->ihdl[i]);
ion_unmap_iommu(display_iclient,
iom_pipe_info->ihdl[i],
DISPLAY_READ_DOMAIN, GEN_POOL);
ion_free(display_iclient,
iom_pipe_info->ihdl[i]);
iom_pipe_info->ihdl[i] = NULL;
}
}
}
iom_pipe_info->mark_unmap = 0;
}
}
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
static int panel_rotate_180 = 1;
#endif
int mdp4_overlay_mixer_play(int mixer_num)
{
if (mixer_num == MDP4_MIXER2)
return ctrl->mixer2_played;
else if (mixer_num == MDP4_MIXER1)
return ctrl->mixer1_played;
else
return ctrl->mixer0_played;
}
void mdp4_overlay_panel_3d(int mixer_num, uint32 panel_3d)
{
ctrl->panel_3d = panel_3d;
}
void mdp4_overlay_panel_mode(int mixer_num, uint32 mode)
{
ctrl->panel_mode |= mode;
}
void mdp4_overlay_panel_mode_unset(int mixer_num, uint32 mode)
{
ctrl->panel_mode &= ~mode;
}
uint32 mdp4_overlay_panel_list(void)
{
return ctrl->panel_mode;
}
int mdp4_overlay_borderfill_supported(void)
{
return (mdp_rev >= MDP_REV_42);
}
void mdp4_overlay_dmae_cfg(struct msm_fb_data_type *mfd, int atv)
{
uint32 dmae_cfg_reg;
if (atv)
dmae_cfg_reg = DMA_DEFLKR_EN;
else
dmae_cfg_reg = 0;
if (mfd->fb_imgType == MDP_BGR_565)
dmae_cfg_reg |= DMA_PACK_PATTERN_BGR;
else
dmae_cfg_reg |= DMA_PACK_PATTERN_RGB;
if (mfd->panel_info.bpp == 18) {
dmae_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */
DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
} else if (mfd->panel_info.bpp == 16) {
dmae_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */
DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
} else {
dmae_cfg_reg |= DMA_DSTC0G_8BITS | /* 888 16BPP */
DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* dma2 config register */
MDP_OUTP(MDP_BASE + 0xb0000, dmae_cfg_reg);
if (atv) {
MDP_OUTP(MDP_BASE + 0xb0070, 0xeb0010);
MDP_OUTP(MDP_BASE + 0xb0074, 0xf00010);
MDP_OUTP(MDP_BASE + 0xb0078, 0xf00010);
MDP_OUTP(MDP_BASE + 0xb3000, 0x80);
MDP_OUTP(MDP_BASE + 0xb3010, 0x1800040);
MDP_OUTP(MDP_BASE + 0xb3014, 0x1000080);
MDP_OUTP(MDP_BASE + 0xb4004, 0x67686970);
} else {
mdp_vid_quant_set();
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
#ifdef CONFIG_FB_MSM_HDMI_3D
void unfill_black_screen(void) { return; }
#else
void unfill_black_screen(void)
{
uint32 temp_src_format;
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/*
* VG2 Constant Color
*/
temp_src_format = inpdw(MDP_BASE + 0x30050);
MDP_OUTP(MDP_BASE + 0x30050, temp_src_format&(~BIT(22)));
/*
* MDP_OVERLAY_REG_FLUSH
*/
MDP_OUTP(MDP_BASE + 0x18000, BIT(3));
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
return;
}
#endif
#ifdef CONFIG_FB_MSM_HDMI_3D
void fill_black_screen(void) { return; }
#else
void fill_black_screen(void)
{
/*Black color*/
uint32 color = 0x00000000;
uint32 temp_src_format;
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/*
* VG2 Constant Color
*/
MDP_OUTP(MDP_BASE + 0x31008, color);
/*
* MDP_VG2_SRC_FORMAT
*/
temp_src_format = inpdw(MDP_BASE + 0x30050);
MDP_OUTP(MDP_BASE + 0x30050, temp_src_format | BIT(22));
/*
* MDP_OVERLAY_REG_FLUSH
*/
MDP_OUTP(MDP_BASE + 0x18000, BIT(3));
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
return;
}
#endif
void mdp4_overlay_dmae_xy(struct mdp4_overlay_pipe *pipe)
{
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
MDP_OUTP(MDP_BASE + 0xb0004,
(pipe->src_height << 16 | pipe->src_width));
if (pipe->dma_blt_addr) {
uint32 off, bpp;
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
MDP_OUTP(MDP_BASE + 0xb0008, pipe->dma_blt_addr + off);
/* RGB888, output of overlay blending */
MDP_OUTP(MDP_BASE + 0xb000c, pipe->src_width * bpp);
} else {
/* dma_e source */
MDP_OUTP(MDP_BASE + 0xb0008, pipe->srcp0_addr);
MDP_OUTP(MDP_BASE + 0xb000c, pipe->srcp0_ystride);
}
/* dma_e dest */
MDP_OUTP(MDP_BASE + 0xb0010, (pipe->dst_y << 16 | pipe->dst_x));
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
void mdp4_overlay_dmap_cfg(struct msm_fb_data_type *mfd, int lcdc)
{
uint32 dma2_cfg_reg;
uint32 mask, curr;
dma2_cfg_reg = DMA_DITHER_EN;
#ifdef BLT_RGB565
/* RGB888 is 0 */
dma2_cfg_reg |= DMA_BUF_FORMAT_RGB565; /* blt only */
#endif
if (mfd->fb_imgType == MDP_BGR_565)
dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
else
dma2_cfg_reg |= DMA_PACK_PATTERN_RGB;
if ((mfd->panel_info.type == MIPI_CMD_PANEL) ||
(mfd->panel_info.type == MIPI_VIDEO_PANEL)) {
dma2_cfg_reg |= DMA_DSTC0G_8BITS | /* 888 24BPP */
DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
} else if (mfd->panel_info.bpp == 18) {
dma2_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */
DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
} else if (mfd->panel_info.bpp == 16) {
dma2_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */
DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
} else {
dma2_cfg_reg |= DMA_DSTC0G_8BITS | /* 888 24BPP */
DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
#ifndef CONFIG_FB_MSM_LCDC_CHIMEI_WXGA_PANEL
if (lcdc)
dma2_cfg_reg |= DMA_PACK_ALIGN_MSB;
#endif
/* dma2 config register */
curr = inpdw(MDP_BASE + 0x90000);
mask = 0x0FFFFFFF;
dma2_cfg_reg = (dma2_cfg_reg & mask) | (curr & ~mask);
MDP_OUTP(MDP_BASE + 0x90000, dma2_cfg_reg);
ctrl->dmap_cfg[0] = dma2_cfg_reg;
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
/*
* mdp4_overlay_dmap_xy: called form baselayer only
*/
void mdp4_overlay_dmap_xy(struct mdp4_overlay_pipe *pipe)
{
uint32 off, bpp;
if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
if (pipe->dma_blt_addr) {
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
if (pipe->dmap_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
ctrl->dmap_cfg[2] = pipe->dma_blt_addr + off;
MDP_OUTP(MDP_BASE + 0x90008, pipe->dma_blt_addr + off);
/* RGB888, output of overlay blending */
MDP_OUTP(MDP_BASE + 0x9000c, pipe->src_width * bpp);
ctrl->dmap_cfg[3] = pipe->src_width * bpp;
} else {
MDP_OUTP(MDP_BASE + 0x90008, pipe->srcp0_addr);
ctrl->dmap_cfg[2] = pipe->srcp0_addr;
MDP_OUTP(MDP_BASE + 0x9000c, pipe->srcp0_ystride);
ctrl->dmap_cfg[3] = pipe->srcp0_ystride;
}
/* dma_p source */
MDP_OUTP(MDP_BASE + 0x90004,
(pipe->src_height << 16 | pipe->src_width));
ctrl->dmap_cfg[1] = (pipe->src_height << 16 | pipe->src_width);
/* dma_p dest */
MDP_OUTP(MDP_BASE + 0x90010, (pipe->dst_y << 16 | pipe->dst_x));
ctrl->dmap_cfg[4] = (pipe->dst_y << 16 | pipe->dst_x);
if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
static void mdp4_overlay_dmap_reconfig(void)
{
MDP_OUTP(MDP_BASE + 0x90000, ctrl->dmap_cfg[0]);
MDP_OUTP(MDP_BASE + 0x90004, ctrl->dmap_cfg[1]);
MDP_OUTP(MDP_BASE + 0x90008, ctrl->dmap_cfg[2]);
MDP_OUTP(MDP_BASE + 0x9000c, ctrl->dmap_cfg[3]);
MDP_OUTP(MDP_BASE + 0x90010, ctrl->dmap_cfg[4]);
}
#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
#define MDP4_VG_PHASE_STEP_SHIFT 29
static int mdp4_leading_0(uint32 num)
{
uint32 bit = 0x80000000;
int i;
for (i = 0; i < 32; i++) {
if (bit & num)
return i;
bit >>= 1;
}
return i;
}
static uint32 mdp4_scale_phase_step(int f_num, uint32 src, uint32 dst)
{
uint32 val, s;
int n;
n = mdp4_leading_0(src);
if (n > f_num)
n = f_num;
s = src << n; /* maximum to reduce lose of resolution */
val = s / dst;
if (n < f_num) {
n = f_num - n;
val <<= n;
val |= ((s % dst) << n) / dst;
}
return val;
}
static void mdp4_scale_setup(struct mdp4_overlay_pipe *pipe)
{
pipe->phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
pipe->phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
if (pipe->dst_h && pipe->src_h != pipe->dst_h) {
u32 upscale_max;
upscale_max = (mdp_rev >= MDP_REV_41) ?
MDP4_REV41_OR_LATER_UP_SCALING_MAX :
MDP4_REV40_UP_SCALING_MAX;
if (pipe->dst_h > pipe->src_h * upscale_max)
return;
pipe->op_mode |= MDP4_OP_SCALEY_EN;
if (pipe->pipe_type == OVERLAY_TYPE_VIDEO) {
if (pipe->flags & MDP_BACKEND_COMPOSITION &&
pipe->alpha_enable && pipe->dst_h > pipe->src_h)
pipe->op_mode |= MDP4_OP_SCALEY_PIXEL_RPT;
else if (pipe->dst_h <= (pipe->src_h / 4))
pipe->op_mode |= MDP4_OP_SCALEY_MN_PHASE;
else
pipe->op_mode |= MDP4_OP_SCALEY_FIR;
} else { /* RGB pipe */
pipe->op_mode |= MDP4_OP_SCALE_RGB_ENHANCED |
MDP4_OP_SCALE_RGB_BILINEAR |
MDP4_OP_SCALE_ALPHA_BILINEAR;
}
pipe->phasey_step = mdp4_scale_phase_step(29,
pipe->src_h, pipe->dst_h);
}
if (pipe->dst_w && pipe->src_w != pipe->dst_w) {
u32 upscale_max;
upscale_max = (mdp_rev >= MDP_REV_41) ?
MDP4_REV41_OR_LATER_UP_SCALING_MAX :
MDP4_REV40_UP_SCALING_MAX;
if (pipe->dst_w > pipe->src_w * upscale_max)
return;
pipe->op_mode |= MDP4_OP_SCALEX_EN;
if (pipe->pipe_type == OVERLAY_TYPE_VIDEO) {
if (pipe->flags & MDP_BACKEND_COMPOSITION &&
pipe->alpha_enable && pipe->dst_w > pipe->src_w)
pipe->op_mode |= MDP4_OP_SCALEX_PIXEL_RPT;
else if (pipe->dst_w <= (pipe->src_w / 4))
pipe->op_mode |= MDP4_OP_SCALEX_MN_PHASE;
else
pipe->op_mode |= MDP4_OP_SCALEX_FIR;
} else { /* RGB pipe */
pipe->op_mode |= MDP4_OP_SCALE_RGB_ENHANCED |
MDP4_OP_SCALE_RGB_BILINEAR |
MDP4_OP_SCALE_ALPHA_BILINEAR;
}
pipe->phasex_step = mdp4_scale_phase_step(29,
pipe->src_w, pipe->dst_w);
}
}
void mdp4_overlay_solidfill_init(struct mdp4_overlay_pipe *pipe)
{
char *base;
uint32 src_size, src_xy, dst_size, dst_xy;
uint32 format;
uint32 off;
int i;
src_size = ((pipe->src_h << 16) | pipe->src_w);
src_xy = ((pipe->src_y << 16) | pipe->src_x);
dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
base = MDP_BASE + MDP4_VIDEO_BASE;
off = MDP4_VIDEO_OFF; /* 0x10000 */
mdp_clk_ctrl(1);
for(i = 0; i < 4; i++) { /* 4 pipes */
format = inpdw(base + 0x50);
format |= MDP4_FORMAT_SOLID_FILL;
outpdw(base + 0x0000, src_size);/* MDP_RGB_SRC_SIZE */
outpdw(base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
outpdw(base + 0x0008, dst_size);/* MDP_RGB_DST_SIZE */
outpdw(base + 0x000c, dst_xy); /* MDP_RGB_DST_XY */
outpdw(base + 0x0050, format);/* MDP_RGB_SRC_FORMAT */
outpdw(base + 0x1008, 0x0);/* Black */
base += off;
}
/*
* keep it at primary
* will be picked up at first commit
*/
ctrl->flush[MDP4_MIXER0] = 0x3c; /* all pipes */
mdp_clk_ctrl(0);
}
void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe)
{
char *rgb_base;
uint32 src_size, src_xy, dst_size, dst_xy;
uint32 format, pattern;
uint32 curr, mask;
uint32 offset = 0;
int pnum;
pnum = pipe->pipe_num - OVERLAY_PIPE_RGB1; /* start from 0 */
rgb_base = MDP_BASE + MDP4_RGB_BASE;
rgb_base += (MDP4_RGB_OFF * pnum);
src_size = ((pipe->src_h << 16) | pipe->src_w);
src_xy = ((pipe->src_y << 16) | pipe->src_x);
dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
if ((pipe->src_x + pipe->src_w) > 0x7FF) {
offset += pipe->src_x * pipe->bpp;
src_xy &= 0xFFFF0000;
}
if ((pipe->src_y + pipe->src_h) > 0x7FF) {
offset += pipe->src_y * pipe->src_width * pipe->bpp;
src_xy &= 0x0000FFFF;
}
format = mdp4_overlay_format(pipe);
pattern = mdp4_overlay_unpack_pattern(pipe);
#ifdef MDP4_IGC_LUT_ENABLE
pipe->op_mode |= MDP4_OP_IGC_LUT_EN;
#endif
mdp4_scale_setup(pipe);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* Ensure proper covert matrix loaded when color space swaps */
curr = inpdw(rgb_base + 0x0058);
/* Don't touch bits you don't want to configure*/
mask = 0xFFFEFFFF;
pipe->op_mode = (pipe->op_mode & mask) | (curr & ~mask);
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
//2012-11-20 taewonee.kim@lge.com : QCT pre patch for the inverted clone image [START]
if((pipe->mfd->panel_info.type != DTV_PANEL)&&(pipe->mfd->panel_info.type != WRITEBACK_PANEL))
//2012-11-20 taewonee.kim@lge.com : QCT pre patch for the inverted clone image [END]
{
if (panel_rotate_180 && (pipe->pipe_num == OVERLAY_PIPE_RGB1 || pipe->pipe_num == OVERLAY_PIPE_RGB2))
{
uint32 op_mode = pipe->op_mode | MDP4_OP_FLIP_UD | MDP4_OP_SCALEY_EN;
if (pipe->ext_flag & MDP_FLIP_UD)
op_mode &= ~MDP4_OP_FLIP_UD;
pipe->op_mode = op_mode;
}
if ((pipe->op_mode & MDP4_OP_FLIP_UD) && pipe->mfd)
dst_xy = (((pipe->mfd->panel_info.yres - pipe->dst_y - pipe->dst_h) << 16) | pipe->dst_x);
}
if (!pipe->mfd)
pr_err("rgb mfd is not set\n");
#endif
outpdw(rgb_base + 0x0000, src_size); /* MDP_RGB_SRC_SIZE */
outpdw(rgb_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
outpdw(rgb_base + 0x0008, dst_size); /* MDP_RGB_DST_SIZE */
outpdw(rgb_base + 0x000c, dst_xy); /* MDP_RGB_DST_XY */
outpdw(rgb_base + 0x0010, pipe->srcp0_addr + offset);
outpdw(rgb_base + 0x0040, pipe->srcp0_ystride);
outpdw(rgb_base + 0x0050, format);/* MDP_RGB_SRC_FORMAT */
outpdw(rgb_base + 0x0054, pattern);/* MDP_RGB_SRC_UNPACK_PATTERN */
if (format & MDP4_FORMAT_SOLID_FILL) {
u32 op_mode = pipe->op_mode;
op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
outpdw(rgb_base + 0x0058, op_mode);/* MDP_RGB_OP_MODE */
} else {
if (pipe->op_mode & MDP4_OP_FLIP_LR && mdp_rev >= MDP_REV_42) {
/* Enable x-scaling bit to enable LR flip */
/* for MDP > 4.2 targets */
pipe->op_mode |= 0x01;
}
outpdw(rgb_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
}
outpdw(rgb_base + 0x005c, pipe->phasex_step);
outpdw(rgb_base + 0x0060, pipe->phasey_step);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mdp4_stat.pipe[pipe->pipe_num]++;
}
static void mdp4_overlay_vg_get_src_offset(struct mdp4_overlay_pipe *pipe,
char *vg_base, uint32 *luma_off, uint32 *chroma_off)
{
uint32 src_xy;
*luma_off = 0;
*chroma_off = 0;
if (pipe->src_x && (pipe->frame_format ==
MDP4_FRAME_FORMAT_LINEAR)) {
src_xy = (pipe->src_y << 16) | pipe->src_x;
src_xy &= 0xffff0000;
outpdw(vg_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
switch (pipe->src_format) {
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CB_CR_H2V2:
*luma_off = pipe->src_x;
*chroma_off = pipe->src_x/2;
break;
case MDP_Y_CBCR_H2V2_TILE:
case MDP_Y_CRCB_H2V2_TILE:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CRCB_H1V1:
case MDP_Y_CBCR_H1V1:
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
case MDP_Y_CBCR_H1V2:
*luma_off = pipe->src_x +
(pipe->src_y * pipe->srcp0_ystride);
*chroma_off = pipe->src_x +
(pipe->src_y * pipe->srcp1_ystride);
break;
case MDP_YCBYCR_H2V1:
case MDP_YCRYCB_H2V1:
if (pipe->src_x & 0x1)
pipe->src_x += 1;
*luma_off += pipe->src_x * 2;
break;
case MDP_ARGB_8888:
case MDP_RGBA_8888:
case MDP_BGRA_8888:
case MDP_RGBX_8888:
case MDP_RGB_565:
case MDP_BGR_565:
case MDP_XRGB_8888:
case MDP_RGB_888:
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
*luma_off = pipe->src_x * pipe->bpp;
break;
default:
pr_err("%s: fmt %u not supported for adjustment\n",
__func__, pipe->src_format);
break;
}
}
}
void mdp4_overlay_vg_setup(struct mdp4_overlay_pipe *pipe)
{
char *vg_base;
uint32 frame_size, src_size, src_xy, dst_size, dst_xy;
uint32 format, pattern, luma_offset, chroma_offset;
/* 2012-11-29 wonhee.jeong@lge.com this code add to mdp tunning when start DMB in G, GK (apq8064) [S]*/
/* This source code confirmed by QCT*/
uint32 mask, curr, addr;
/* 2012-11-29 wonhee.jeong@lge.com this code add to mdp tunning when start DMB in G, GK (apq8064) [E]*/
int pnum, ptype, i;
uint32_t block;
pnum = pipe->pipe_num - OVERLAY_PIPE_VG1; /* start from 0 */
vg_base = MDP_BASE + MDP4_VIDEO_BASE;
vg_base += (MDP4_VIDEO_OFF * pnum);
frame_size = ((pipe->src_height << 16) | pipe->src_width);
src_size = ((pipe->src_h << 16) | pipe->src_w);
src_xy = ((pipe->src_y << 16) | pipe->src_x);
dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
ptype = mdp4_overlay_format2type(pipe->src_format);
format = mdp4_overlay_format(pipe);
pattern = mdp4_overlay_unpack_pattern(pipe);
/* CSC Post Processing enabled? */
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG) {
if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_ENABLE)
pipe->op_mode |= MDP4_OP_CSC_EN;
if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_YUV_IN)
pipe->op_mode |= MDP4_OP_SRC_DATA_YCBCR;
if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_YUV_OUT)
pipe->op_mode |= MDP4_OP_DST_DATA_YCBCR;
mdp4_csc_write(&pipe->pp_cfg.csc_cfg,
(uint32_t) (vg_base + MDP4_VIDEO_CSC_OFF));
if (pipe->pipe_num == OVERLAY_PIPE_VG1)
block = MDP_BLOCK_VG_1;
else
block = MDP_BLOCK_VG_2;
for (i = 0; i < CSC_MAX_BLOCKS; i++) {
if (block == csc_cfg_matrix[i].block) {
memcpy(&csc_cfg_matrix[i].csc_data,
&(pipe->pp_cfg.csc_cfg),
sizeof(struct mdp_csc_cfg));
break;
}
}
}
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_QSEED_CFG) {
mdp4_qseed_access_cfg(&pipe->pp_cfg.qseed_cfg[0],
(uint32_t) vg_base);
mdp4_qseed_access_cfg(&pipe->pp_cfg.qseed_cfg[1],
(uint32_t) vg_base);
}
}
/* not RGB use VG pipe, pure VG pipe */
if (ptype != OVERLAY_TYPE_RGB)
pipe->op_mode |= (MDP4_OP_CSC_EN | MDP4_OP_SRC_DATA_YCBCR);
#ifdef MDP4_IGC_LUT_ENABLE
pipe->op_mode |= MDP4_OP_IGC_LUT_EN;
#endif
mdp4_scale_setup(pipe);
luma_offset = 0;
chroma_offset = 0;
if (ptype == OVERLAY_TYPE_RGB) {
if ((pipe->src_y + pipe->src_h) > 0x7FF) {
luma_offset = pipe->src_y * pipe->src_width * pipe->bpp;
src_xy &= 0x0000FFFF;
}
if ((pipe->src_x + pipe->src_w) > 0x7FF) {
luma_offset += pipe->src_x * pipe->bpp;
src_xy &= 0xFFFF0000;
}
}
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
//2012-11-20 taewonee.kim@lge.com : QCT pre patch for the inverted clone image [START]
if((pipe->mfd->panel_info.type != DTV_PANEL) && (pipe->mfd->panel_info.type != WRITEBACK_PANEL))
//2012-11-20 taewonee.kim@lge.com : QCT pre patch for the inverted clone image [END]
{
if (panel_rotate_180)
{
uint32 op_mode = pipe->op_mode | MDP4_OP_FLIP_UD;
if (pipe->ext_flag & MDP_FLIP_UD)
op_mode &= ~MDP4_OP_FLIP_UD;
pipe->op_mode = op_mode;
}
if ((pipe->op_mode & MDP4_OP_FLIP_UD) && pipe->mfd)
{
dst_xy = (((pipe->mfd->panel_info.yres - pipe->dst_y - pipe->dst_h) << 16) | pipe->dst_x);
outpdw(MDP_BASE + 0xE0044, 0xe0fff);
}
}
if (!pipe->mfd)
pr_err("vg mfd is not set\n");
#endif
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
outpdw(vg_base + 0x0000, src_size); /* MDP_RGB_SRC_SIZE */
outpdw(vg_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
outpdw(vg_base + 0x0008, dst_size); /* MDP_RGB_DST_SIZE */
outpdw(vg_base + 0x000c, dst_xy); /* MDP_RGB_DST_XY */
if (pipe->frame_format != MDP4_FRAME_FORMAT_LINEAR) {
struct mdp4_overlay_pipe *real_pipe;
u32 psize, csize;
/*
* video tile frame size register is NOT double buffered.
* when this register updated, it kicks in immediatly
* During transition from smaller resolution to higher
* resolution it may have possibility that mdp still fetch
* from smaller resolution buffer with new higher resolution
* frame size. This will cause iommu page fault.
*/
real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
psize = real_pipe->prev_src_height * real_pipe->prev_src_width;
csize = pipe->src_height * pipe->src_width;
if (psize && (csize > psize)) {
frame_size = (real_pipe->prev_src_height << 16 |
real_pipe->prev_src_width);
}
outpdw(vg_base + 0x0048, frame_size); /* TILE frame size */
real_pipe->prev_src_height = pipe->src_height;
real_pipe->prev_src_width = pipe->src_width;
}
/*
* Adjust src X offset to avoid MDP from overfetching pixels
* present before the offset. This is required for video
* frames coming with unused green pixels along the left margin
*/
/* not RGB use VG pipe, pure VG pipe */
if (ptype != OVERLAY_TYPE_RGB) {
mdp4_overlay_vg_get_src_offset(pipe, vg_base, &luma_offset,
&chroma_offset);
}
/* 2012-11-29 wonhee.jeong@lge.com this code add to mdp tunning when start DMB in G, GK (apq8064) [S]*/
/* This source code confirmed by QCT*/
/* Ensure proper covert matrix loaded when color space swaps */
curr = inpdw(vg_base + 0x0058);
mask = 0x600;
if ((curr & mask) != (pipe->op_mode & mask)) {
addr = ((uint32_t)vg_base) + 0x4000;
if (ptype != OVERLAY_TYPE_RGB)
mdp4_csc_write(&(mdp_csc_convert[1]), addr);
else
mdp4_csc_write(&(mdp_csc_convert[0]), addr);
mask = 0xFFFCFFFF;
} else {
/* Don't touch bits you don't want to configure*/
mask = 0xFFFCF1FF;
}
pipe->op_mode = (pipe->op_mode & mask) | (curr & ~mask);
/* 2012-11-29 wonhee.jeong@lge.com this code add to mdp tunning when start DMB in G, GK (apq8064) [E]*/
/* luma component plane */
outpdw(vg_base + 0x0010, pipe->srcp0_addr + luma_offset);
/* chroma component plane or planar color 1 */
outpdw(vg_base + 0x0014, pipe->srcp1_addr + chroma_offset);
/* planar color 2 */
outpdw(vg_base + 0x0018, pipe->srcp2_addr + chroma_offset);
outpdw(vg_base + 0x0040,
pipe->srcp1_ystride << 16 | pipe->srcp0_ystride);
outpdw(vg_base + 0x0044,
pipe->srcp3_ystride << 16 | pipe->srcp2_ystride);
outpdw(vg_base + 0x0050, format); /* MDP_RGB_SRC_FORMAT */
outpdw(vg_base + 0x0054, pattern); /* MDP_RGB_SRC_UNPACK_PATTERN */
if (format & MDP4_FORMAT_SOLID_FILL) {
u32 op_mode = pipe->op_mode;
op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
outpdw(vg_base + 0x0058, op_mode);/* MDP_RGB_OP_MODE */
} else
outpdw(vg_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
outpdw(vg_base + 0x005c, pipe->phasex_step);
outpdw(vg_base + 0x0060, pipe->phasey_step);
if (pipe->op_mode & MDP4_OP_DITHER_EN) {
outpdw(vg_base + 0x0068,
pipe->r_bit << 4 | pipe->b_bit << 2 | pipe->g_bit);
}
if (mdp_rev > MDP_REV_41) {
/* mdp chip select controller */
mask = 0;
if (pipe->pipe_num == OVERLAY_PIPE_VG1)
mask = 0x020; /* bit 5 */
else if (pipe->pipe_num == OVERLAY_PIPE_VG2)
mask = 0x02000; /* bit 13 */
if (mask) {
if (pipe->op_mode & MDP4_OP_SCALEY_MN_PHASE)
ctrl->cs_controller &= ~mask;
else
ctrl->cs_controller |= mask;
/* NOT double buffered */
outpdw(MDP_BASE + 0x00c0, ctrl->cs_controller);
}
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mdp4_stat.pipe[pipe->pipe_num]++;
}
int mdp4_overlay_format2type(uint32 format)
{
switch (format) {
case MDP_RGB_565:
case MDP_RGB_888:
case MDP_BGR_565:
case MDP_XRGB_8888:
case MDP_ARGB_8888:
case MDP_RGBA_8888:
case MDP_BGRA_8888:
case MDP_RGBX_8888:
return OVERLAY_TYPE_RGB;
case MDP_YCBYCR_H2V1:
case MDP_YCRYCB_H2V1:
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
case MDP_Y_CBCR_H1V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V2_TILE:
case MDP_Y_CRCB_H2V2_TILE:
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CB_CR_H2V2:
case MDP_Y_CRCB_H1V1:
case MDP_Y_CBCR_H1V1:
case MDP_YCRCB_H1V1:
case MDP_YCBCR_H1V1:
return OVERLAY_TYPE_VIDEO;
case MDP_RGB_BORDERFILL:
return OVERLAY_TYPE_BF;
default:
mdp4_stat.err_format++;
return -ERANGE;
}
}
#define C3_ALPHA 3 /* alpha */
#define C2_R_Cr 2 /* R/Cr */
#define C1_B_Cb 1 /* B/Cb */
#define C0_G_Y 0 /* G/luma */
#define YUV_444_MAX_WIDTH 1280 /* Max width for YUV 444*/
int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe)
{
switch (pipe->src_format) {
case MDP_RGB_565:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 1; /* R, 5 bits */
pipe->b_bit = 1; /* B, 5 bits */
pipe->g_bit = 2; /* G, 6 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element2 = C2_R_Cr; /* R */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C1_B_Cb; /* B */
pipe->bpp = 2; /* 2 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_RGB_888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
pipe->bpp = 3; /* 3 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_BGR_565:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 1; /* R, 5 bits */
pipe->b_bit = 1; /* B, 5 bits */
pipe->g_bit = 2; /* G, 6 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
pipe->bpp = 2; /* 2 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_XRGB_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C1_B_Cb; /* B */
pipe->element2 = C0_G_Y; /* G */
pipe->element1 = C2_R_Cr; /* R */
pipe->element0 = C3_ALPHA; /* alpha */
pipe->bpp = 4; /* 4 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_ARGB_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 1;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C1_B_Cb; /* B */
pipe->element2 = C0_G_Y; /* G */
pipe->element1 = C2_R_Cr; /* R */
pipe->element0 = C3_ALPHA; /* alpha */
pipe->bpp = 4; /* 4 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_RGBA_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 1;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C3_ALPHA; /* alpha */
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
pipe->bpp = 4; /* 4 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_RGBX_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C3_ALPHA; /* alpha */
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
pipe->bpp = 4; /* 4 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_BGRA_8888:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 3; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 1;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
pipe->element3 = C3_ALPHA; /* alpha */
pipe->element2 = C2_R_Cr; /* R */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C1_B_Cb; /* B */
pipe->bpp = 4; /* 4 bpp */
pipe->chroma_sample = MDP4_CHROMA_RGB;
break;
case MDP_YCBYCR_H2V1:
case MDP_YCRYCB_H2V1:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0; /* alpha, 4 bits */
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 3;
if (pipe->src_format == MDP_YCRYCB_H2V1) {
pipe->element3 = C0_G_Y; /* G */
pipe->element2 = C2_R_Cr; /* R */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C1_B_Cb; /* B */
} else if (pipe->src_format == MDP_YCBYCR_H2V1) {
pipe->element3 = C0_G_Y; /* G */
pipe->element2 = C1_B_Cb; /* B */
pipe->element1 = C0_G_Y; /* G */
pipe->element0 = C2_R_Cr; /* R */
}
pipe->bpp = 2; /* 2 bpp */
pipe->chroma_sample = MDP4_CHROMA_H2V1;
break;
case MDP_Y_CRCB_H2V1:
case MDP_Y_CBCR_H2V1:
case MDP_Y_CRCB_H1V2:
case MDP_Y_CBCR_H1V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CRCB_H1V1:
case MDP_Y_CBCR_H1V1:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_PSEUDO_PLANAR;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 1; /* 2 */
if (pipe->src_format == MDP_Y_CRCB_H2V1) {
pipe->element1 = C1_B_Cb;
pipe->element0 = C2_R_Cr;
pipe->chroma_sample = MDP4_CHROMA_H2V1;
} else if (pipe->src_format == MDP_Y_CRCB_H1V1) {
pipe->element1 = C1_B_Cb;
pipe->element0 = C2_R_Cr;
if (pipe->src_width > YUV_444_MAX_WIDTH)
pipe->chroma_sample = MDP4_CHROMA_H1V2;
else
pipe->chroma_sample = MDP4_CHROMA_RGB;
} else if (pipe->src_format == MDP_Y_CBCR_H2V1) {
pipe->element1 = C2_R_Cr;
pipe->element0 = C1_B_Cb;
pipe->chroma_sample = MDP4_CHROMA_H2V1;
} else if (pipe->src_format == MDP_Y_CBCR_H1V1) {
pipe->element1 = C2_R_Cr;
pipe->element0 = C1_B_Cb;
if (pipe->src_width > YUV_444_MAX_WIDTH)
pipe->chroma_sample = MDP4_CHROMA_H1V2;
else
pipe->chroma_sample = MDP4_CHROMA_RGB;
} else if (pipe->src_format == MDP_Y_CRCB_H1V2) {
pipe->element1 = C1_B_Cb;
pipe->element0 = C2_R_Cr;
pipe->chroma_sample = MDP4_CHROMA_H1V2;
} else if (pipe->src_format == MDP_Y_CBCR_H1V2) {
pipe->element1 = C2_R_Cr;
pipe->element0 = C1_B_Cb;
pipe->chroma_sample = MDP4_CHROMA_H1V2;
} else if (pipe->src_format == MDP_Y_CRCB_H2V2) {
pipe->element1 = C1_B_Cb;
pipe->element0 = C2_R_Cr;
pipe->chroma_sample = MDP4_CHROMA_420;
} else if (pipe->src_format == MDP_Y_CBCR_H2V2) {
pipe->element1 = C2_R_Cr;
pipe->element0 = C1_B_Cb;
pipe->chroma_sample = MDP4_CHROMA_420;
}
pipe->bpp = 2; /* 2 bpp */
break;
case MDP_Y_CBCR_H2V2_TILE:
case MDP_Y_CRCB_H2V2_TILE:
pipe->frame_format = MDP4_FRAME_FORMAT_VIDEO_SUPERTILE;
pipe->fetch_plane = OVERLAY_PLANE_PSEUDO_PLANAR;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 1; /* 2 */
if (pipe->src_format == MDP_Y_CRCB_H2V2_TILE) {
pipe->element1 = C1_B_Cb; /* B */
pipe->element0 = C2_R_Cr; /* R */
pipe->chroma_sample = MDP4_CHROMA_420;
} else if (pipe->src_format == MDP_Y_CBCR_H2V2_TILE) {
pipe->element1 = C2_R_Cr; /* R */
pipe->element0 = C1_B_Cb; /* B */
pipe->chroma_sample = MDP4_CHROMA_420;
}
pipe->bpp = 2; /* 2 bpp */
break;
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CB_CR_H2V2:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_PLANAR;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->chroma_sample = MDP4_CHROMA_420;
pipe->bpp = 2; /* 2 bpp */
break;
case MDP_YCBCR_H1V1:
case MDP_YCRCB_H1V1:
pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
pipe->a_bit = 0;
pipe->r_bit = 3; /* R, 8 bits */
pipe->b_bit = 3; /* B, 8 bits */
pipe->g_bit = 3; /* G, 8 bits */
pipe->alpha_enable = 0;
pipe->unpack_tight = 1;
pipe->unpack_align_msb = 0;
pipe->unpack_count = 2;
pipe->element0 = C0_G_Y; /* G */
if (pipe->src_format == MDP_YCRCB_H1V1) {
pipe->element1 = C2_R_Cr; /* R */
pipe->element2 = C1_B_Cb; /* B */
} else {
pipe->element1 = C1_B_Cb; /* B */
pipe->element2 = C2_R_Cr; /* R */
}
pipe->bpp = 3; /* 3 bpp */
case MDP_RGB_BORDERFILL:
pipe->alpha_enable = 0;
pipe->alpha = 0;
break;
default:
/* not likely */
mdp4_stat.err_format++;
return -ERANGE;
}
return 0;
}
/*
* color_key_convert: output with 12 bits color key
*/
static uint32 color_key_convert(int start, int num, uint32 color)
{
uint32 data;
data = (color >> start) & ((1 << num) - 1);
/* convert to 8 bits */
if (num == 5)
data = ((data << 3) | (data >> 2));
else if (num == 6)
data = ((data << 2) | (data >> 4));
/* convert 8 bits to 12 bits */
data = (data << 4) | (data >> 4);
return data;
}
void transp_color_key(int format, uint32 transp,
uint32 *c0, uint32 *c1, uint32 *c2)
{
int b_start, g_start, r_start;
int b_num, g_num, r_num;
switch (format) {
case MDP_RGB_565:
b_start = 0;
g_start = 5;
r_start = 11;
r_num = 5;
g_num = 6;
b_num = 5;
break;
case MDP_RGB_888:
case MDP_XRGB_8888:
case MDP_ARGB_8888:
case MDP_BGRA_8888:
b_start = 0;
g_start = 8;
r_start = 16;
r_num = 8;
g_num = 8;
b_num = 8;
break;
case MDP_RGBA_8888:
case MDP_RGBX_8888:
b_start = 16;
g_start = 8;
r_start = 0;
r_num = 8;
g_num = 8;
b_num = 8;
break;
case MDP_BGR_565:
b_start = 11;
g_start = 5;
r_start = 0;
r_num = 5;
g_num = 6;
b_num = 5;
break;
case MDP_Y_CB_CR_H2V2:
case MDP_Y_CBCR_H2V2:
case MDP_Y_CBCR_H2V1:
case MDP_YCBCR_H1V1:
b_start = 8;
g_start = 16;
r_start = 0;
r_num = 8;
g_num = 8;
b_num = 8;
break;
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CRCB_H2V2:
case MDP_Y_CRCB_H2V1:
case MDP_Y_CRCB_H1V2:
case MDP_Y_CBCR_H1V2:
case MDP_Y_CRCB_H1V1:
case MDP_Y_CBCR_H1V1:
case MDP_YCRCB_H1V1:
b_start = 0;
g_start = 16;
r_start = 8;
r_num = 8;
g_num = 8;
b_num = 8;
break;
default:
b_start = 0;
g_start = 8;
r_start = 16;
r_num = 8;
g_num = 8;
b_num = 8;
break;
}
*c0 = color_key_convert(g_start, g_num, transp);
*c1 = color_key_convert(b_start, b_num, transp);
*c2 = color_key_convert(r_start, r_num, transp);
}
uint32 mdp4_overlay_format(struct mdp4_overlay_pipe *pipe)
{
uint32 format;
format = 0;
if (pipe->solid_fill)
format |= MDP4_FORMAT_SOLID_FILL;
if (pipe->unpack_align_msb)
format |= MDP4_FORMAT_UNPACK_ALIGN_MSB;
if (pipe->unpack_tight)
format |= MDP4_FORMAT_UNPACK_TIGHT;
if (pipe->alpha_enable)
format |= MDP4_FORMAT_ALPHA_ENABLE;
if (pipe->flags & MDP_SOURCE_ROTATED_90)
format |= MDP4_FORMAT_90_ROTATED;
format |= (pipe->unpack_count << 13);
format |= ((pipe->bpp - 1) << 9);
format |= (pipe->a_bit << 6);
format |= (pipe->r_bit << 4);
format |= (pipe->b_bit << 2);
format |= pipe->g_bit;
format |= (pipe->frame_format << 29);
/* video/graphic */
format |= (pipe->fetch_plane << 19);
format |= (pipe->chroma_site << 28);
format |= (pipe->chroma_sample << 26);
return format;
}
uint32 mdp4_overlay_unpack_pattern(struct mdp4_overlay_pipe *pipe)
{
return (pipe->element3 << 24) | (pipe->element2 << 16) |
(pipe->element1 << 8) | pipe->element0;
}
/*
* mdp4_overlayproc_cfg: only be called from base layer
*/
void mdp4_overlayproc_cfg(struct mdp4_overlay_pipe *pipe)
{
uint32 data, intf;
char *overlay_base;
uint32 curr;
intf = 0;
if (pipe->mixer_num == MDP4_MIXER2)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC2_BASE;
else if (pipe->mixer_num == MDP4_MIXER1) {
overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
intf = inpdw(MDP_BASE + 0x0038); /* MDP_DISP_INTF_SEL */
intf >>= 4;
intf &= 0x03;
} else
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/*
* BLT support both primary and external external
*/
if (pipe->ov_blt_addr) {
int off, bpp;
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
bpp = 3; /* overlay ouput is RGB888 */
#endif
data = pipe->src_height;
data <<= 16;
data |= pipe->src_width;
outpdw(overlay_base + 0x0008, data); /* ROI, height + width */
if (pipe->mixer_num == MDP4_MIXER0 ||
pipe->mixer_num == MDP4_MIXER1) {
off = 0;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
/* overlay ouput is RGB888 */
outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
/* MDDI - BLT + on demand */
outpdw(overlay_base + 0x0004, 0x08);
curr = inpdw(overlay_base + 0x0014);
curr &= 0x4;
#ifdef BLT_RGB565
outpdw(overlay_base + 0x0014, curr | 0x1); /* RGB565 */
#else
outpdw(overlay_base + 0x0014, curr | 0x0); /* RGB888 */
#endif
} else if (pipe->mixer_num == MDP4_MIXER2) {
if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
off = 0;
bpp = 1;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height *
pipe->src_width * bpp;
outpdw(overlay_base + 0x000c,
pipe->ov_blt_addr + off);
/* overlay ouput is RGB888 */
outpdw(overlay_base + 0x0010,
((pipe->src_width << 16) |
pipe->src_width));
outpdw(overlay_base + 0x001c,
pipe->ov_blt_addr + off);
off = pipe->src_height * pipe->src_width;
/* align chroma to 2k address */
off = (off + 2047) & ~2047;
/* UV plane adress */
outpdw(overlay_base + 0x0020,
pipe->ov_blt_addr + off);
/* MDDI - BLT + on demand */
outpdw(overlay_base + 0x0004, 0x08);
/* pseudo planar + writeback */
curr = inpdw(overlay_base + 0x0014);
curr &= 0x4;
outpdw(overlay_base + 0x0014, curr | 0x012);
/* rgb->yuv */
outpdw(overlay_base + 0x0200, 0x05);
}
}
} else {
data = pipe->src_height;
data <<= 16;
data |= pipe->src_width;
outpdw(overlay_base + 0x0008, data); /* ROI, height + width */
outpdw(overlay_base + 0x000c, pipe->srcp0_addr);
outpdw(overlay_base + 0x0010, pipe->srcp0_ystride);
outpdw(overlay_base + 0x0004, 0x01); /* directout */
}
if (pipe->mixer_num == MDP4_MIXER1) {
if (intf == TV_INTF) {
curr = inpdw(overlay_base + 0x0014);
curr &= 0x4;
outpdw(overlay_base + 0x0014, 0x02); /* yuv422 */
/* overlay1 CSC config */
outpdw(overlay_base + 0x0200, 0x05); /* rgb->yuv */
}
}
#ifdef MDP4_IGC_LUT_ENABLE
curr = inpdw(overlay_base + 0x0014);
curr &= ~0x4;
outpdw(overlay_base + 0x0014, curr | 0x4); /* GC_LUT_EN, 888 */
#endif
if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
int mdp4_overlay_pipe_staged(struct mdp4_overlay_pipe *pipe)
{
uint32 data, mask;
int mixer;
mixer = pipe->mixer_num;
data = ctrl->mixer_cfg[mixer];
mask = 0x0f;
mask <<= (4 * pipe->pipe_num);
data &= mask;
return data;
}
int mdp4_mixer_info(int mixer_num, struct mdp_mixer_info *info)
{
int ndx, cnt;
struct mdp4_overlay_pipe *pipe;
if (mixer_num > MDP4_MIXER_MAX)
return -ENODEV;
cnt = 0;
ndx = MDP4_MIXER_STAGE_BASE;
for ( ; ndx < MDP4_MIXER_STAGE_MAX; ndx++) {
pipe = &ctrl->plist[ndx];
if (pipe == NULL)
continue;
if (!pipe->pipe_used)
continue;
info->z_order = pipe->mixer_stage - MDP4_MIXER_STAGE0;
/* z_order == -1, means base layer */
info->ptype = pipe->pipe_type;
info->pnum = pipe->pipe_num;
info->pndx = pipe->pipe_ndx;
info->mixer_num = pipe->mixer_num;
info++;
cnt++;
}
return cnt;
}
void mdp4_mixer_reset(int mixer)
{
uint32 data, data1, mask;
int i, ndx, min, max, bit;
mdp_clk_ctrl(1);
/* MDP_LAYERMIXER_IN_CFG, shard by both mixer 0 and 1 */
data = inpdw(MDP_BASE + 0x10100);
data1 = data;
if (mixer == 0) {
min = 1;
max = 8;
bit = 0x03; /* mixer0, dmap */
} else {
min = 9;
max = 0xf;
bit = 0x0C; /* mixer1, dmae */
}
mask = 0x0f;
for (i = 0 ; i < 8 ; i++) {
ndx = data & mask;
ndx >>= (i * 4);
if (ndx >= min && ndx <= max)
data1 &= ~mask; /* unstage pipe from mixer */
mask <<= 4;
}
pr_debug("%s: => MIXER_RESET, data1=%x data=%x bit=%x\n",
__func__, data1, data, bit);
/* unstage pipes of mixer to be reset */
outpdw(MDP_BASE + 0x10100, data1); /* MDP_LAYERMIXER_IN_CFG */
outpdw(MDP_BASE + 0x18000, 0);
mdp4_sw_reset(bit); /* reset mixer */ /* 0 => mixer0, dmap */
/* restore origianl stage */
outpdw(MDP_BASE + 0x10100, data); /* MDP_LAYERMIXER_IN_CFG */
outpdw(MDP_BASE + 0x18000, 0);
mdp4_vg_csc_restore();
mdp4_overlay_dmap_reconfig();
mdp_clk_ctrl(0);
}
void mdp4_mixer_stage_commit(int mixer)
{
struct mdp4_overlay_pipe *pipe;
int i, num;
u32 data, stage;
int off;
unsigned long flags;
data = 0;
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pipe = ctrl->stage[mixer][i];
if (pipe == NULL)
continue;
pr_debug("%s: mixer=%d ndx=%d stage=%d\n", __func__,
mixer, pipe->pipe_ndx, i);
stage = pipe->mixer_stage;
if (mixer >= MDP4_MIXER1)
stage += 8;
stage <<= (4 * pipe->pipe_num);
data |= stage;
}
/*
* stage_commit may be called from overlay_unset
* for command panel, mdp clocks may be off at this time.
* so mdp clock enabled is necessary
*/
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
mdp_clk_ctrl(1);
if (data)
mdp4_mixer_blend_setup(mixer);
off = 0;
if (data != ctrl->mixer_cfg[mixer]) {
ctrl->mixer_cfg[mixer] = data;
if (mixer >= MDP4_MIXER2) {
/* MDP_LAYERMIXER2_IN_CFG */
off = 0x100f0;
} else {
/* mixer 0 or 1 */
num = mixer + 1;
num &= 0x01;
data |= ctrl->mixer_cfg[num];
off = 0x10100;
}
pr_debug("%s: mixer=%d data=%x flush=%x pid=%d\n", __func__,
mixer, data, ctrl->flush[mixer], current->pid);
}
local_irq_save(flags);
if (off)
outpdw(MDP_BASE + off, data);
if (ctrl->flush[mixer]) {
outpdw(MDP_BASE + 0x18000, ctrl->flush[mixer]);
ctrl->flush[mixer] = 0;
}
local_irq_restore(flags);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mdp_clk_ctrl(0);
}
void mdp4_mixer_stage_up(struct mdp4_overlay_pipe *pipe, int commit)
{
struct mdp4_overlay_pipe *pp;
int i, mixer;
mixer = pipe->mixer_num;
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pp = ctrl->stage[mixer][i];
if (pp && pp->pipe_ndx == pipe->pipe_ndx) {
ctrl->stage[mixer][i] = NULL;
break;
}
}
ctrl->stage[mixer][pipe->mixer_stage] = pipe; /* keep it */
if (commit)
mdp4_mixer_stage_commit(mixer);
}
void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe, int commit)
{
struct mdp4_overlay_pipe *pp;
int i, mixer;
mixer = pipe->mixer_num;
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pp = ctrl->stage[mixer][i];
if (pp && pp->pipe_ndx == pipe->pipe_ndx)
ctrl->stage[mixer][i] = NULL; /* clear it */
}
if (commit)
mdp4_mixer_stage_commit(mixer);
}
/*
* mixer0: rgb3: border color at register 0x15004, 0x15008
* mixer1: vg3: border color at register 0x1D004, 0x1D008
* mixer2: xxx: border color at register 0x8D004, 0x8D008
*/
void mdp4_overlay_borderfill_stage_up(struct mdp4_overlay_pipe *pipe)
{
struct mdp4_overlay_pipe *bspipe;
int ptype, pnum, pndx, mixer;
int format, alpha_enable, alpha;
struct mdp4_iommu_pipe_info iom;
if (pipe->pipe_type != OVERLAY_TYPE_BF)
return;
mixer = pipe->mixer_num;
if (ctrl->baselayer[mixer])
return;
bspipe = ctrl->stage[mixer][MDP4_MIXER_STAGE_BASE];
if (bspipe == NULL) {
pr_err("%s: no base layer at mixer=%d\n",
__func__, mixer);
return;
}
/*
* bspipe is clone here
* get real pipe
*/
bspipe = mdp4_overlay_ndx2pipe(bspipe->pipe_ndx);
if (bspipe == NULL) {
pr_err("%s: mdp4_overlay_ndx2pipe returned null pipe ndx\n",
__func__);
return;
}
/* save original base layer */
ctrl->baselayer[mixer] = bspipe;
iom = pipe->iommu;
pipe->alpha = 0; /* make sure bf pipe has alpha 0 */
ptype = pipe->pipe_type;
pnum = pipe->pipe_num;
pndx = pipe->pipe_ndx;
format = pipe->src_format;
alpha_enable = pipe->alpha_enable;
alpha = pipe->alpha;
*pipe = *bspipe; /* keep base layer configuration */
pipe->pipe_type = ptype;
pipe->pipe_num = pnum;
pipe->pipe_ndx = pndx;
pipe->src_format = format;
pipe->alpha_enable = alpha_enable;
pipe->alpha = alpha;
pipe->iommu = iom;
/* free original base layer pipe to be sued as normal pipe */
bspipe->pipe_used = 0;
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_base_swap(0, pipe);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_base_swap(0, pipe);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_base_swap(0, pipe);
else if (ctrl->panel_mode & MDP4_PANEL_DTV)
mdp4_dtv_base_swap(0, pipe);
mdp4_overlay_reg_flush(bspipe, 1);
/* borderfill pipe as base layer */
mdp4_mixer_stage_up(pipe, 0);
}
void mdp4_overlay_borderfill_stage_down(struct mdp4_overlay_pipe *pipe)
{
struct mdp4_overlay_pipe *bspipe;
int ptype, pnum, pndx, mixer;
int format, alpha_enable, alpha;
struct mdp4_iommu_pipe_info iom;
if (pipe->pipe_type != OVERLAY_TYPE_BF)
return;
mixer = pipe->mixer_num;
/* retrieve original base layer */
bspipe = ctrl->baselayer[mixer];
if (bspipe == NULL) {
pr_err("%s: no base layer at mixer=%d\n",
__func__, mixer);
return;
}
iom = bspipe->iommu;
ptype = bspipe->pipe_type;
pnum = bspipe->pipe_num;
pndx = bspipe->pipe_ndx;
format = bspipe->src_format;
alpha_enable = bspipe->alpha_enable;
alpha = bspipe->alpha;
*bspipe = *pipe; /* restore base layer configuration */
bspipe->pipe_type = ptype;
bspipe->pipe_num = pnum;
bspipe->pipe_ndx = pndx;
bspipe->src_format = format;
bspipe->alpha_enable = alpha_enable;
bspipe->alpha = alpha;
bspipe->iommu = iom;
bspipe->pipe_used++; /* mark base layer pipe used */
ctrl->baselayer[mixer] = NULL;
/* free borderfill pipe */
pipe->pipe_used = 0;
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_base_swap(0, bspipe);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_base_swap(0, bspipe);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_base_swap(0, bspipe);
else if (ctrl->panel_mode & MDP4_PANEL_DTV)
mdp4_dtv_base_swap(0, bspipe);
/* free borderfill pipe */
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe, 0); /* commit will happen for bspipe up */
mdp4_overlay_pipe_free(pipe, 0);
/* stage up base layer */
mdp4_overlay_reg_flush(bspipe, 1);
/* restore original base layer */
mdp4_mixer_stage_up(bspipe, 1);
}
static struct mdp4_overlay_pipe *mdp4_background_layer(int mixer,
struct mdp4_overlay_pipe *sp)
{
struct mdp4_overlay_pipe *pp;
struct mdp4_overlay_pipe *kp;
int i;
kp = ctrl->stage[mixer][MDP4_MIXER_STAGE_BASE];
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pp = ctrl->stage[mixer][i];
if (pp == NULL)
continue;
if (pp == sp)
break;
if ((pp->dst_x <= sp->dst_x) &&
((pp->dst_x + pp->dst_w) >= (sp->dst_x + sp->dst_w))) {
if ((pp->dst_y <= sp->dst_y) &&
((pp->dst_y + pp->dst_h) >=
(sp->dst_y + sp->dst_h))) {
kp = pp;
}
}
}
return kp;
}
static void mdp4_overlay_bg_solidfill(struct blend_cfg *blend)
{
struct mdp4_overlay_pipe *pipe;
char *base;
u32 op_mode, format;
int pnum, ptype;
pipe = blend->solidfill_pipe;
if (pipe == NULL)
return;
if (pipe->pipe_type == OVERLAY_TYPE_BF)
return;
ptype = mdp4_overlay_format2type(pipe->src_format);
if (ptype == OVERLAY_TYPE_RGB) {
pnum = pipe->pipe_num - OVERLAY_PIPE_RGB1;
base = MDP_BASE + MDP4_RGB_BASE;
base += MDP4_RGB_OFF * pnum;
} else {
pnum = pipe->pipe_num - OVERLAY_PIPE_VG1;
base = MDP_BASE + MDP4_VIDEO_BASE;
base += MDP4_VIDEO_OFF * pnum;
}
format = inpdw(base + 0x50);
if (blend->solidfill) {
format |= MDP4_FORMAT_SOLID_FILL;
/*
* If solid fill is enabled, flip and scale
* have to be disabled. otherwise, h/w
* underruns.
*/
op_mode = inpdw(base + 0x0058);
op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
outpdw(base + 0x0058, op_mode);
outpdw(base + 0x1008, 0); /* black */
/*
* Set src size and dst size same to avoid underruns
*/
outpdw(base + 0x0000, inpdw(base + 0x0008));
} else {
u32 src_size = ((pipe->src_h << 16) | pipe->src_w);
outpdw(base + 0x0000, src_size);
format &= ~MDP4_FORMAT_SOLID_FILL;
blend->solidfill_pipe = NULL;
}
outpdw(base + 0x50, format);
mdp4_overlay_reg_flush(pipe, 0);
}
void mdp4_mixer_blend_cfg(int mixer)
{
int i, off;
unsigned char *overlay_base;
struct blend_cfg *blend;
if (mixer == MDP4_MIXER2)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC2_BASE;
else if (mixer == MDP4_MIXER1)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;
else
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;
blend = &ctrl->blend[mixer][MDP4_MIXER_STAGE_BASE];
blend++; /* stage0 */
for (i = MDP4_MIXER_STAGE0; i < MDP4_MIXER_STAGE_MAX; i++) {
off = 20 * i;
off = 0x20 * (i - MDP4_MIXER_STAGE0);
if (i == MDP4_MIXER_STAGE3)
off -= 4;
outpdw(overlay_base + off + 0x104, blend->op);
blend++;
}
}
static void mdp4_set_blend_by_op(struct mdp4_overlay_pipe *s_pipe,
struct mdp4_overlay_pipe *d_pipe,
int alpha_drop,
struct blend_cfg *blend)
{
int d_alpha, s_alpha;
u32 op;
d_alpha = d_pipe->alpha_enable;
s_alpha = s_pipe->alpha_enable;
/* base on fg's alpha */
blend->fg_alpha = s_pipe->alpha;
blend->bg_alpha = 0x0ff - s_pipe->alpha;
blend->op = MDP4_BLEND_FG_ALPHA_FG_CONST |
MDP4_BLEND_BG_ALPHA_BG_CONST;
blend->co3_sel = 1; /* use fg alpha */
op = s_pipe->blend_op;
if (op == BLEND_OP_OPAQUE) {
blend->bg_alpha = 0;
blend->fg_alpha = 0xff;
} else if ((op == BLEND_OP_PREMULTIPLIED) &&
(!alpha_drop) && s_alpha) {
blend->op = MDP4_BLEND_FG_ALPHA_FG_CONST |
MDP4_BLEND_BG_INV_ALPHA |
MDP4_BLEND_BG_ALPHA_FG_PIXEL;
if (blend->fg_alpha != 0xff) {
blend->bg_alpha = blend->fg_alpha;
blend->op |= MDP4_BLEND_BG_MOD_ALPHA;
}
} else if (!alpha_drop && s_alpha) {
blend->op = MDP4_BLEND_FG_ALPHA_FG_PIXEL |
MDP4_BLEND_BG_INV_ALPHA |
MDP4_BLEND_BG_ALPHA_FG_PIXEL;
if (blend->fg_alpha != 0xff) {
blend->bg_alpha = blend->fg_alpha;
blend->op |= MDP4_BLEND_FG_MOD_ALPHA |
MDP4_BLEND_BG_MOD_ALPHA;
}
}
if (!s_alpha && d_alpha)
blend->co3_sel = 0;
pr_debug("%s: op %d bg alpha %d, fg alpha %d blend: %x\n",
__func__, op, blend->bg_alpha, blend->fg_alpha, blend->op);
}
static void mdp4_set_blend_by_fmt(struct mdp4_overlay_pipe *s_pipe,
struct mdp4_overlay_pipe *d_pipe,
int alpha_drop,
struct blend_cfg *blend)
{
int ptype, d_alpha, s_alpha;
d_alpha = d_pipe->alpha_enable;
s_alpha = s_pipe->alpha_enable;
/* base on fg's alpha */
blend->bg_alpha = 0x0ff - s_pipe->alpha;
blend->fg_alpha = s_pipe->alpha;
blend->co3_sel = 1; /* use fg alpha */
if (s_pipe->is_fg) {
if (s_pipe->alpha == 0xff) {
blend->solidfill = 1;
blend->solidfill_pipe = d_pipe;
}
} else if (s_alpha) {
if (!alpha_drop) {
blend->op = MDP4_BLEND_BG_ALPHA_FG_PIXEL;
if (!(s_pipe->flags & MDP_BLEND_FG_PREMULT))
blend->op |=
MDP4_BLEND_FG_ALPHA_FG_PIXEL;
} else
blend->op = MDP4_BLEND_BG_ALPHA_FG_CONST;
blend->op |= MDP4_BLEND_BG_INV_ALPHA;
} else if (d_alpha) {
ptype = mdp4_overlay_format2type(s_pipe->src_format);
if (ptype == OVERLAY_TYPE_VIDEO &&
(!(s_pipe->flags & MDP_BACKEND_COMPOSITION))) {
blend->op = (MDP4_BLEND_FG_ALPHA_BG_PIXEL |
MDP4_BLEND_FG_INV_ALPHA);
if (!(s_pipe->flags & MDP_BLEND_FG_PREMULT))
blend->op |=
MDP4_BLEND_BG_ALPHA_BG_PIXEL;
blend->co3_sel = 0; /* use bg alpha */
} else {
/* s_pipe is rgb without alpha */
blend->op = (MDP4_BLEND_FG_ALPHA_FG_CONST |
MDP4_BLEND_BG_ALPHA_BG_CONST);
blend->bg_alpha = 0;
}
}
}
/*
* D(i+1) = Ks * S + Kd * D(i)
*/
void mdp4_mixer_blend_setup(int mixer)
{
struct mdp4_overlay_pipe *d_pipe;
struct mdp4_overlay_pipe *s_pipe;
struct blend_cfg *blend;
int i, off, alpha_drop;
unsigned char *overlay_base;
uint32 c0, c1, c2;
d_pipe = ctrl->stage[mixer][MDP4_MIXER_STAGE_BASE];
if (d_pipe == NULL) {
pr_err("%s: Error: no bg_pipe at mixer=%d\n", __func__, mixer);
return;
}
blend = &ctrl->blend[mixer][MDP4_MIXER_STAGE0];
for (i = MDP4_MIXER_STAGE0; i < MDP4_MIXER_STAGE_MAX; i++) {
blend->solidfill = 0;
blend->op = (MDP4_BLEND_FG_ALPHA_FG_CONST |
MDP4_BLEND_BG_ALPHA_BG_CONST);
s_pipe = ctrl->stage[mixer][i];
if (s_pipe == NULL) {
blend++;
d_pipe = NULL;
continue;
}
alpha_drop = 0; /* per stage */
/* alpha channel is lost on VG pipe when using QSEED or M/N */
if (s_pipe->pipe_type == OVERLAY_TYPE_VIDEO &&
s_pipe->alpha_enable &&
((s_pipe->op_mode & MDP4_OP_SCALEY_EN) ||
(s_pipe->op_mode & MDP4_OP_SCALEX_EN)) &&
!(s_pipe->op_mode & (MDP4_OP_SCALEX_PIXEL_RPT |
MDP4_OP_SCALEY_PIXEL_RPT)))
alpha_drop = 1;
d_pipe = mdp4_background_layer(mixer, s_pipe);
pr_debug("%s: stage=%d: bg: ndx=%d da=%d dalpha=%x "
"fg: ndx=%d sa=%d salpha=%x is_fg=%d alpha_drop=%d\n",
__func__, i-2, d_pipe->pipe_ndx, d_pipe->alpha_enable,
d_pipe->alpha, s_pipe->pipe_ndx, s_pipe->alpha_enable,
s_pipe->alpha, s_pipe->is_fg, alpha_drop);
if ((s_pipe->blend_op == BLEND_OP_NOT_DEFINED) ||
(s_pipe->blend_op >= BLEND_OP_MAX))
mdp4_set_blend_by_fmt(s_pipe, d_pipe,
alpha_drop, blend);
else
mdp4_set_blend_by_op(s_pipe, d_pipe, alpha_drop, blend);
if (s_pipe->transp != MDP_TRANSP_NOP) {
if (s_pipe->is_fg) {
transp_color_key(s_pipe->src_format,
s_pipe->transp, &c0, &c1, &c2);
/* Fg blocked */
blend->op |= MDP4_BLEND_FG_TRANSP_EN;
/* lower limit */
blend->transp_low0 = (c1 << 16 | c0);
blend->transp_low1 = c2;
/* upper limit */
blend->transp_high0 = (c1 << 16 | c0);
blend->transp_high1 = c2;
} else {
transp_color_key(d_pipe->src_format,
s_pipe->transp, &c0, &c1, &c2);
/* Fg blocked */
blend->op |= MDP4_BLEND_BG_TRANSP_EN;
blend--; /* one stage back */
/* lower limit */
blend->transp_low0 = (c1 << 16 | c0);
blend->transp_low1 = c2;
/* upper limit */
blend->transp_high0 = (c1 << 16 | c0);
blend->transp_high1 = c2;
blend++; /* back to original stage */
}
}
blend++;
}
/* mixer numer, /dev/fb0, /dev/fb1, /dev/fb2 */
if (mixer == MDP4_MIXER2)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC2_BASE;/* 0x88000 */
else if (mixer == MDP4_MIXER1)
overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
else
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
blend = &ctrl->blend[mixer][MDP4_MIXER_STAGE_BASE];
/* lower limit */
outpdw(overlay_base + 0x180, blend->transp_low0);
outpdw(overlay_base + 0x184, blend->transp_low1);
/* upper limit */
outpdw(overlay_base + 0x188, blend->transp_high0);
outpdw(overlay_base + 0x18c, blend->transp_high1);
blend++; /* stage0 */
for (i = MDP4_MIXER_STAGE0; i < MDP4_MIXER_STAGE_MAX; i++) {
off = 20 * i;
off = 0x20 * (i - MDP4_MIXER_STAGE0);
if (i == MDP4_MIXER_STAGE3)
off -= 4;
if (blend->solidfill_pipe)
mdp4_overlay_bg_solidfill(blend);
outpdw(overlay_base + off + 0x108, blend->fg_alpha);
outpdw(overlay_base + off + 0x10c, blend->bg_alpha);
if (mdp_rev >= MDP_REV_42)
outpdw(overlay_base + off + 0x104, blend->op);
outpdw(overlay_base + (off << 5) + 0x1004, blend->co3_sel);
outpdw(overlay_base + off + 0x110, blend->transp_low0);/* low */
outpdw(overlay_base + off + 0x114, blend->transp_low1);/* low */
/* upper limit */
outpdw(overlay_base + off + 0x118, blend->transp_high0);
outpdw(overlay_base + off + 0x11c, blend->transp_high1);
blend++;
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
void mdp4_overlay_reg_flush(struct mdp4_overlay_pipe *pipe, int all)
{
int mixer;
uint32 *reg;
mixer = pipe->mixer_num;
reg = &ctrl->flush[mixer];
*reg |= (1 << (2 + pipe->pipe_num));
if (all) {
if (mixer == MDP4_MIXER0)
*reg |= 0x01;
else
*reg |= 0x02;
}
}
void mdp4_overlay_flush_piggyback(int m0, int m1)
{
u32 data;
data = ctrl->flush[m0] | ctrl->flush[m1];
ctrl->flush[m0] = data;
}
void mdp4_overlay_reg_flush_reset(struct mdp4_overlay_pipe *pipe)
{
int mixer;
mixer = pipe->mixer_num;
ctrl->flush[mixer] = 0;
}
struct mdp4_overlay_pipe *mdp4_overlay_stage_pipe(int mixer, int stage)
{
return ctrl->stage[mixer][stage];
}
struct mdp4_overlay_pipe *mdp4_overlay_ndx2pipe(int ndx)
{
struct mdp4_overlay_pipe *pipe;
if (ndx <= 0 || ndx > OVERLAY_PIPE_MAX)
return NULL;
pipe = &ctrl->plist[ndx - 1]; /* ndx start from 1 */
if (pipe->pipe_used == 0)
return NULL;
return pipe;
}
struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer)
{
int i;
struct mdp4_overlay_pipe *pipe;
if (ptype == OVERLAY_TYPE_BF) {
if (!mdp4_overlay_borderfill_supported())
return NULL;
}
for (i = 0; i < OVERLAY_PIPE_MAX; i++) {
pipe = &ctrl->plist[i];
if (pipe->pipe_type == ptype ||
(ptype == OVERLAY_TYPE_RGB && pipe->pipe_type == OVERLAY_TYPE_VIDEO)) {
if ((ptype == OVERLAY_TYPE_BF && mixer != pipe->mixer_num) ||
(ptype != OVERLAY_TYPE_BF && pipe->pipe_used != 0)) {
continue;
}
else if (ptype == OVERLAY_TYPE_BF) { //borderfill pipe
mdp4_overlay_borderfill_stage_down(pipe);
}
init_completion(&pipe->comp);
init_completion(&pipe->dmas_comp);
pr_debug("%s: pipe=%x ndx=%d num=%d\n", __func__,
(int)pipe, pipe->pipe_ndx, pipe->pipe_num);
return pipe;
}
}
pr_err("%s: ptype=%d FAILED\n", __func__, ptype);
return NULL;
}
void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe, int all)
{
uint32 ptype, num, ndx, mixer;
struct mdp4_iommu_pipe_info iom;
struct mdp4_overlay_pipe *orgpipe;
pr_debug("%s: pipe=%x ndx=%d\n", __func__, (int)pipe, pipe->pipe_ndx);
ptype = pipe->pipe_type;
num = pipe->pipe_num;
ndx = pipe->pipe_ndx;
mixer = pipe->mixer_num;
/* No need for borderfill pipe */
if (pipe->pipe_type != OVERLAY_TYPE_BF)
mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, all);
iom = pipe->iommu;
memset(pipe, 0, sizeof(*pipe));
pipe->pipe_type = ptype;
pipe->pipe_num = num;
pipe->pipe_ndx = ndx;
pipe->mixer_num = mixer;
pipe->iommu = iom;
/*Clear real pipe attributes as well */
orgpipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
if (orgpipe != NULL)
orgpipe->pipe_used = 0;
}
static int mdp4_overlay_req2pipe(struct mdp_overlay *req, int mixer,
struct mdp4_overlay_pipe **ppipe,
struct msm_fb_data_type *mfd)
{
struct mdp4_overlay_pipe *pipe;
int ret, ptype;
u32 upscale_max;
upscale_max = (mdp_rev >= MDP_REV_41) ?
MDP4_REV41_OR_LATER_UP_SCALING_MAX :
MDP4_REV40_UP_SCALING_MAX;
if (mfd == NULL) {
pr_err("%s: mfd == NULL, -ENODEV\n", __func__);
return -ENODEV;
}
if (mixer >= MDP4_MIXER_MAX) {
pr_err("%s: mixer out of range!\n", __func__);
mdp4_stat.err_mixer++;
return -ERANGE;
}
if (req->z_order < 0 || req->z_order > 3) {
pr_err("%s: z_order=%d out of range!\n", __func__,
req->z_order);
mdp4_stat.err_zorder++;
return -ERANGE;
}
if (req->src_rect.h > 0xFFF || req->src_rect.h < 2) {
pr_err("%s: src_h is out of range: 0X%x!\n",
__func__, req->src_rect.h);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->src_rect.w > 0xFFF || req->src_rect.w < 2) {
pr_err("%s: src_w is out of range: 0X%x!\n",
__func__, req->src_rect.w);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->src_rect.x > 0xFFF) {
pr_err("%s: src_x is out of range: 0X%x!\n",
__func__, req->src_rect.x);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->src_rect.y > 0xFFF) {
pr_err("%s: src_y is out of range: 0X%x!\n",
__func__, req->src_rect.y);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->dst_rect.h > 0xFFF || req->dst_rect.h < 2) {
pr_err("%s: dst_h is out of range: 0X%x!\n",
__func__, req->dst_rect.h);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->dst_rect.w > 0xFFF || req->dst_rect.w < 2) {
pr_err("%s: dst_w is out of range: 0X%x!\n",
__func__, req->dst_rect.w);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->dst_rect.x > 0xFFF) {
pr_err("%s: dst_x is out of range: 0X%x!\n",
__func__, req->dst_rect.x);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->dst_rect.y > 0xFFF) {
pr_err("%s: dst_y is out of range: 0X%x!\n",
__func__, req->dst_rect.y);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->src_rect.h == 0 || req->src_rect.w == 0) {
pr_err("%s: src img of zero size!\n", __func__);
mdp4_stat.err_size++;
return -EINVAL;
}
if (req->dst_rect.h > (req->src_rect.h * upscale_max)) {
mdp4_stat.err_scale++;
pr_err("%s: scale up, too much (h)!\n", __func__);
return -ERANGE;
}
if (req->src_rect.h > (req->dst_rect.h * 8)) { /* too little */
mdp4_stat.err_scale++;
pr_err("%s: scale down, too little (h)!\n", __func__);
return -ERANGE;
}
if (req->dst_rect.w > (req->src_rect.w * upscale_max)) {
mdp4_stat.err_scale++;
pr_err("%s: scale up, too much (w)!\n", __func__);
return -ERANGE;
}
if (req->src_rect.w > (req->dst_rect.w * 8)) { /* too little */
mdp4_stat.err_scale++;
pr_err("%s: scale down, too little (w)!\n", __func__);
return -ERANGE;
}
if (mdp_hw_revision == MDP4_REVISION_V1) {
/* non integer down saceling ratio smaller than 1/4
* is not supportted
*/
if (req->src_rect.h > (req->dst_rect.h * 4)) {
if (req->src_rect.h % req->dst_rect.h) {
mdp4_stat.err_scale++;
pr_err("%s: need integer (h)!\n", __func__);
return -ERANGE;
}
}
if (req->src_rect.w > (req->dst_rect.w * 4)) {
if (req->src_rect.w % req->dst_rect.w) {
mdp4_stat.err_scale++;
pr_err("%s: need integer (w)!\n", __func__);
return -ERANGE;
}
}
}
if (((req->src_rect.x + req->src_rect.w) > req->src.width) ||
((req->src_rect.y + req->src_rect.h) > req->src.height)) {
mdp4_stat.err_size++;
pr_err("%s invalid src rectangle\n", __func__);
return -ERANGE;
}
if (ctrl->panel_3d != MDP4_3D_SIDE_BY_SIDE) {
int xres;
int yres;
xres = mfd->panel_info.xres;
yres = mfd->panel_info.yres;
if (((req->dst_rect.x + req->dst_rect.w) > xres) ||
((req->dst_rect.y + req->dst_rect.h) > yres)) {
mdp4_stat.err_size++;
pr_err("%s invalid dst rectangle (%dx%d) vs (%dx%d)\n", __func__,(req->dst_rect.x + req->dst_rect.w),(req->dst_rect.y + req->dst_rect.h),xres,yres);
return -ERANGE;
}
}
ptype = mdp4_overlay_format2type(req->src.format);
if (ptype < 0) {
pr_err("%s: mdp4_overlay_format2type!\n", __func__);
return ptype;
}
if (req->flags & MDP_OV_PIPE_SHARE)
ptype = OVERLAY_TYPE_VIDEO; /* VG pipe supports both RGB+YUV */
if (req->id == MSMFB_NEW_REQUEST) /* new request */
pipe = mdp4_overlay_pipe_alloc(ptype, mixer);
else
pipe = mdp4_overlay_ndx2pipe(req->id);
if (pipe == NULL) {
pr_err("%s: pipe == NULL!\n", __func__);
return -ENOMEM;
}
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
pipe->mfd = mfd;
#endif
if (!display_iclient && !IS_ERR_OR_NULL(mfd->iclient)) {
display_iclient = mfd->iclient;
pr_debug("%s(): display_iclient %p\n", __func__,
display_iclient);
}
pipe->src_format = req->src.format;
ret = mdp4_overlay_format2pipe(pipe);
if (ret < 0) {
pr_err("%s: mdp4_overlay_format2pipe!\n", __func__);
return ret;
}
/*
* base layer == 1, reserved for frame buffer
* zorder 0 == stage 0 == 2
* zorder 1 == stage 1 == 3
* zorder 2 == stage 2 == 4
*/
if (req->id == MSMFB_NEW_REQUEST) { /* new request */
if (mdp4_overlay_pipe_staged(pipe)) {
pr_err("%s: ndx=%d still staged\n", __func__,
pipe->pipe_ndx);
return -EPERM;
}
pipe->pipe_used++;
pipe->mixer_num = mixer;
pr_debug("%s: zorder=%d pipe ndx=%d num=%d\n", __func__,
req->z_order, pipe->pipe_ndx, pipe->pipe_num);
}
pipe->mixer_stage = req->z_order + MDP4_MIXER_STAGE0;
pipe->src_width = req->src.width & 0x1fff; /* source img width */
pipe->src_height = req->src.height & 0x1fff; /* source img height */
pipe->src_h = req->src_rect.h & 0x07ff;
pipe->src_w = req->src_rect.w & 0x07ff;
pipe->src_y = req->src_rect.y & 0x07ff;
pipe->src_x = req->src_rect.x & 0x07ff;
pipe->dst_h = req->dst_rect.h & 0x07ff;
pipe->dst_w = req->dst_rect.w & 0x07ff;
pipe->dst_y = req->dst_rect.y & 0x07ff;
pipe->dst_x = req->dst_rect.x & 0x07ff;
pipe->op_mode = 0;
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
pipe->ext_flag = req->flags;
#endif
if (req->flags & MDP_FLIP_LR)
pipe->op_mode |= MDP4_OP_FLIP_LR;
if (req->flags & MDP_FLIP_UD)
pipe->op_mode |= MDP4_OP_FLIP_UD;
if (req->flags & MDP_DITHER)
pipe->op_mode |= MDP4_OP_DITHER_EN;
if (req->flags & MDP_DEINTERLACE)
pipe->op_mode |= MDP4_OP_DEINT_EN;
if (req->flags & MDP_DEINTERLACE_ODD)
pipe->op_mode |= MDP4_OP_DEINT_ODD_REF;
pipe->is_fg = req->is_fg;/* control alpha and color key */
pipe->alpha = req->alpha & 0x0ff;
pipe->blend_op = req->blend_op;
pipe->transp = req->transp_mask;
pipe->flags = req->flags;
*ppipe = pipe;
return 0;
}
static int mdp4_calc_pipe_mdp_clk(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
u32 pclk;
u32 xscale, yscale;
u32 hsync = 0;
u32 shift = 16;
u64 rst;
int ptype;
int ret = -EINVAL;
if (!pipe) {
pr_err("%s: pipe is null!\n", __func__);
return ret;
}
if (!mfd) {
pr_err("%s: mfd is null!\n", __func__);
return ret;
}
pr_debug("%s: pipe sets: panel res(x,y)=(%d,%d)\n",
__func__, mfd->panel_info.xres, mfd->panel_info.yres);
pr_debug("%s: src(w,h)(%d,%d),src(x,y)(%d,%d)\n",
__func__, pipe->src_w, pipe->src_h, pipe->src_x, pipe->src_y);
pr_debug("%s: dst(w,h)(%d,%d),dst(x,y)(%d,%d)\n",
__func__, pipe->dst_w, pipe->dst_h, pipe->dst_x, pipe->dst_y);
pclk = (mfd->panel_info.type == MIPI_VIDEO_PANEL ||
mfd->panel_info.type == MIPI_CMD_PANEL) ?
mfd->panel_info.mipi.dsi_pclk_rate :
mfd->panel_info.clk_rate;
if (mfd->panel_info.type == LVDS_PANEL &&
mfd->panel_info.lvds.channel_mode == LVDS_DUAL_CHANNEL_MODE)
pclk = pclk << 1;
if (!pclk) {
pipe->req_clk = mdp_max_clk;
pr_err("%s panel pixel clk is zero!\n", __func__);
return ret;
}
pr_debug("%s: mdp panel pixel clk is %d.\n",
__func__, pclk);
if (!pipe->dst_h) {
pr_err("%s: pipe dst_h is zero!\n", __func__);
pipe->req_clk = mdp_max_clk;
return ret;
}
if (!pipe->src_h) {
pr_err("%s: pipe src_h is zero!\n", __func__);
pipe->req_clk = mdp_max_clk;
return ret;
}
if (!pipe->dst_w) {
pr_err("%s: pipe dst_w is zero!\n", __func__);
pipe->req_clk = mdp_max_clk;
return ret;
}
if (!pipe->dst_h) {
pr_err("%s: pipe dst_h is zero!\n", __func__);
pipe->req_clk = mdp_max_clk;
return ret;
}
if (pipe->mixer_num == MDP4_MIXER0) {
if (pipe->blt_forced)
return 0;
ptype = mdp4_overlay_format2type(pipe->src_format);
if (ptype == OVERLAY_TYPE_VIDEO) {
if ((pipe->src_h >= 720) && (pipe->src_w >= 1080)) {
pipe->req_clk = (u32) mdp_max_clk + 100;
pipe->blt_forced++;
return 0;
} else if ((pipe->src_h >= 1080) && (pipe->src_w >= 720)) {
pipe->req_clk = (u32) mdp_max_clk + 100;
pipe->blt_forced++;
return 0;
}
}
}
/*
* For the scaling cases, make more margin by removing porch
* values and adding extra 20%.
*/
if ((pipe->src_h != pipe->dst_h) ||
(pipe->src_w != pipe->dst_w)) {
hsync = mfd->panel_info.xres;
hsync *= 100;
hsync /= 120;
pr_debug("%s: panel hsync is %d. with scaling\n",
__func__, hsync);
} else {
hsync = mfd->panel_info.lcdc.h_back_porch +
mfd->panel_info.lcdc.h_front_porch +
mfd->panel_info.lcdc.h_pulse_width +
mfd->panel_info.xres;
pr_debug("%s: panel hsync is %d.\n",
__func__, hsync);
}
if (!hsync) {
pipe->req_clk = mdp_max_clk;
pr_err("%s: panel hsync is zero!\n", __func__);
return 0;
}
xscale = mfd->panel_info.xres;
xscale += pipe->src_w;
if (xscale < pipe->dst_w) {
pipe->req_clk = mdp_max_clk;
pr_err("%s: xres+src_w cannot be less than dst_w!\n",
__func__);
return ret;
}
xscale -= pipe->dst_w;
xscale <<= shift;
xscale /= hsync;
pr_debug("%s: the right %d shifted xscale is %d.\n",
__func__, shift, xscale);
if (pipe->src_h > pipe->dst_h) {
yscale = pipe->src_h;
yscale <<= shift;
yscale /= pipe->dst_h;
} else { /* upscale */
yscale = pipe->dst_h;
yscale <<= shift;
yscale /= pipe->src_h;
}
yscale *= pipe->src_w;
yscale /= hsync;
pr_debug("%s: the right %d shifted yscale is %d.\n",
__func__, shift, yscale);
rst = pclk;
if (yscale > xscale)
rst *= yscale;
else
rst *= xscale;
rst >>= shift;
/*
* There is one special case for the panels that have low
* v_back_porch (<=4), mdp clk should be fast enough to buffer
* 4 lines input during back porch time if scaling is
* required(FIR).
*/
if ((mfd->panel_info.lcdc.v_back_porch <= 4) &&
(pipe->src_h != pipe->dst_h) &&
(mfd->panel_info.lcdc.v_back_porch)) {
u32 clk = 0;
clk = 4 * (pclk >> shift) / mfd->panel_info.lcdc.v_back_porch;
clk <<= shift;
pr_debug("%s: mdp clk rate %d based on low vbp %d\n",
__func__, clk, mfd->panel_info.lcdc.v_back_porch);
rst = (rst > clk) ? rst : clk;
}
/*
* If the calculated mdp clk is less than panel pixel clk,
* most likely due to upscaling, mdp clk rate will be set to
* greater than pclk. Now the driver uses 1.15 as the
* factor. Ideally this factor is passed from board file.
*/
if (rst < pclk) {
rst = ((pclk >> shift) * 23 / 20) << shift;
pr_debug("%s calculated mdp clk is less than pclk.\n",
__func__);
}
/*
* Interlaced videos require the max mdp clk but cannot
* be explained by mdp clk equation.
*/
if (pipe->flags & MDP_DEINTERLACE) {
rst = (rst > mdp_max_clk) ? rst : mdp_max_clk;
pr_info("%s deinterlace requires max mdp clk.\n",
__func__);
}
pipe->req_clk = (u32) rst;
pr_debug("%s: required mdp clk %d mixer %d pipe ndx %d\n",
__func__, pipe->req_clk, pipe->mixer_num, pipe->pipe_ndx);
return 0;
}
static int mdp4_calc_pipe_mdp_bw(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
u32 fps;
int ret = -EINVAL;
u32 quota;
u32 shift = 16;
if (!pipe) {
pr_err("%s: pipe is null!\n", __func__);
return ret;
}
if (!mfd) {
pr_err("%s: mfd is null!\n", __func__);
return ret;
}
fps = mdp_get_panel_framerate(mfd);
quota = pipe->src_w * pipe->src_h * fps * pipe->bpp;
quota >>= shift;
pipe->bw_ab_quota = quota * mdp_bw_ab_factor / 100;
pipe->bw_ib_quota = quota * mdp_bw_ib_factor / 100;
pr_debug("%s max_bw=%llu ab_factor=%d ib_factor=%d\n", __func__,
mdp_max_bw, mdp_bw_ab_factor, mdp_bw_ib_factor);
/* down scaling factor for ib */
if ((!pipe->dst_h) && (!pipe->src_h) &&
(pipe->src_h > pipe->dst_h)) {
u64 ib = quota;
ib *= pipe->src_h;
ib /= pipe->dst_h;
pipe->bw_ib_quota = max(ib, pipe->bw_ib_quota);
pr_debug("%s: src_h=%d dst_h=%d mdp ib %llu, ib_quota=%llu\n",
__func__, pipe->src_h, pipe->dst_h,
ib<<shift, pipe->bw_ib_quota<<shift);
}
pipe->bw_ab_quota <<= shift;
pipe->bw_ib_quota <<= shift;
pr_debug("%s: pipe ndx=%d src(h,w)(%d, %d) fps=%d bpp=%d\n",
__func__, pipe->pipe_ndx, pipe->src_h, pipe->src_w,
fps, pipe->bpp);
pr_debug("%s: ab_quota=%llu ib_quota=%llu\n", __func__,
pipe->bw_ab_quota, pipe->bw_ib_quota);
return 0;
}
int mdp4_calc_blt_mdp_bw(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
struct mdp4_overlay_perf *perf_req = &perf_request;
u32 fps;
int bpp;
int ret = -EINVAL;
u32 quota;
u32 shift = 16;
if (!pipe) {
pr_err("%s: pipe is null!\n", __func__);
return ret;
}
if (!mfd) {
pr_err("%s: mfd is null!\n", __func__);
return ret;
}
mutex_lock(&perf_mutex);
bpp = BLT_BPP;
fps = mdp_get_panel_framerate(mfd);
/* read and write bw*/
quota = pipe->dst_w * pipe->dst_h * fps * bpp * 2;
quota >>= shift;
perf_req->mdp_ov_ab_bw[pipe->mixer_num] =
quota * mdp_bw_ab_factor / 100;
perf_req->mdp_ov_ib_bw[pipe->mixer_num] =
quota * mdp_bw_ib_factor / 100;
perf_req->mdp_ov_ab_bw[pipe->mixer_num] <<= shift;
perf_req->mdp_ov_ib_bw[pipe->mixer_num] <<= shift;
pr_debug("%s: pipe ndx=%d dst(h,w)(%d, %d) fps=%d bpp=%d\n",
__func__, pipe->pipe_ndx, pipe->dst_h, pipe->dst_w,
fps, bpp);
pr_debug("%s: overlay=%d ab_bw=%llu ib_bw=%llu\n", __func__,
pipe->mixer_num,
perf_req->mdp_ov_ab_bw[pipe->mixer_num],
perf_req->mdp_ov_ib_bw[pipe->mixer_num]);
mutex_unlock(&perf_mutex);
return 0;
}
int mdp4_overlay_mdp_perf_req(struct msm_fb_data_type *mfd)
{
u32 worst_mdp_clk = 0;
int i;
struct mdp4_overlay_perf *perf_req = &perf_request;
struct mdp4_overlay_pipe *pipe;
u32 cnt = 0;
int ret = -EINVAL;
u64 ab_quota_total = 0, ib_quota_total = 0;
if (!mfd) {
pr_err("%s: mfd is null!\n", __func__);
return ret;
}
mutex_lock(&perf_mutex);
pipe = ctrl->plist;
for (i = 0; i < MDP4_MIXER_MAX; i++)
perf_req->use_ov_blt[i] = 0;
for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
if (!pipe) {
mutex_unlock(&perf_mutex);
return ret;
}
if (!pipe->pipe_used)
continue;
cnt++;
if (worst_mdp_clk < pipe->req_clk)
worst_mdp_clk = pipe->req_clk;
if (pipe->req_clk > mdp_max_clk)
perf_req->use_ov_blt[pipe->mixer_num] = 1;
if (pipe->mixer_num == MDP4_MIXER2)
perf_req->use_ov_blt[MDP4_MIXER2] = 1;
if (pipe->pipe_type != OVERLAY_TYPE_BF) {
ab_quota_total += pipe->bw_ab_quota;
ib_quota_total += pipe->bw_ib_quota;
}
if (mfd->mdp_rev == MDP_REV_41) {
/*
* writeback (blt) mode to provide work around
* for dsi cmd mode interface hardware bug.
*/
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
if (pipe->dst_x != 0)
perf_req->use_ov_blt[MDP4_MIXER0] = 1;
}
if ((mfd->panel_info.xres > 1280) &&
(mfd->panel_info.type != DTV_PANEL)) {
perf_req->use_ov_blt[MDP4_MIXER0] = 1;
}
}
}
perf_req->mdp_clk_rate = min(worst_mdp_clk, mdp_max_clk);
perf_req->mdp_clk_rate = mdp_clk_round_rate(perf_req->mdp_clk_rate);
for (i = 0; i < MDP4_MIXER_MAX; i++) {
if (perf_req->use_ov_blt[i]) {
ab_quota_total += perf_req->mdp_ov_ab_bw[i];
ib_quota_total += perf_req->mdp_ov_ib_bw[i];
}
}
perf_req->mdp_ab_bw = roundup(ab_quota_total, MDP_BUS_SCALE_AB_STEP);
perf_req->mdp_ib_bw = roundup(ib_quota_total, MDP_BUS_SCALE_AB_STEP);
pr_debug("%s %d: ab_quota_total=(%llu, %d) ib_quota_total=(%llu, %d)\n",
__func__, __LINE__,
ab_quota_total, perf_req->mdp_ab_bw,
ib_quota_total, perf_req->mdp_ib_bw);
if (ab_quota_total > mdp_max_bw)
pr_warn("%s: req ab bw=%llu is larger than max bw=%llu",
__func__, ab_quota_total, mdp_max_bw);
if (ib_quota_total > mdp_max_bw)
pr_warn("%s: req ib bw=%llu is larger than max bw=%llu",
__func__, ib_quota_total, mdp_max_bw);
pr_debug("%s %d: pid %d cnt %d clk %d ov0_blt %d, ov1_blt %d\n",
__func__, __LINE__, current->pid, cnt,
perf_req->mdp_clk_rate,
perf_req->use_ov_blt[0],
perf_req->use_ov_blt[1]);
mutex_unlock(&perf_mutex);
return 0;
}
int mdp4_overlay_mdp_pipe_req(struct mdp4_overlay_pipe *pipe,
struct msm_fb_data_type *mfd)
{
int ret = 0;
if (mdp4_calc_pipe_mdp_clk(mfd, pipe)) {
pr_err("%s unable to calc mdp pipe clk rate ret=%d\n",
__func__, ret);
ret = -EINVAL;
}
if (mdp4_calc_pipe_mdp_bw(mfd, pipe)) {
pr_err("%s unable to calc mdp pipe bandwidth ret=%d\n",
__func__, ret);
ret = -EINVAL;
}
return ret;
}
void mdp4_overlay_mdp_perf_upd(struct msm_fb_data_type *mfd,
int flag)
{
struct mdp4_overlay_perf *perf_req = &perf_request;
struct mdp4_overlay_perf *perf_cur = &perf_current;
pr_debug("%s %d: req mdp clk %d, cur mdp clk %d flag %d\n",
__func__, __LINE__,
perf_req->mdp_clk_rate,
perf_cur->mdp_clk_rate,
flag);
mutex_lock(&perf_mutex);
if (!mdp4_extn_disp)
perf_cur->use_ov_blt[1] = 0;
if (flag) {
if (perf_req->mdp_clk_rate > perf_cur->mdp_clk_rate) {
mdp_set_core_clk(perf_req->mdp_clk_rate);
pr_info("%s mdp clk is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_clk_rate,
perf_req->mdp_clk_rate);
perf_cur->mdp_clk_rate =
perf_req->mdp_clk_rate;
}
if ((perf_req->mdp_ab_bw > perf_cur->mdp_ab_bw) ||
(perf_req->mdp_ib_bw > perf_cur->mdp_ib_bw)) {
mdp_bus_scale_update_request
(perf_req->mdp_ab_bw, perf_req->mdp_ib_bw);
pr_debug("%s mdp ab_bw is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_ab_bw,
perf_req->mdp_ab_bw);
pr_debug("%s mdp ib_bw is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_ib_bw,
perf_req->mdp_ib_bw);
perf_cur->mdp_ab_bw = perf_req->mdp_ab_bw;
perf_cur->mdp_ib_bw = perf_req->mdp_ib_bw;
}
if ((mfd->panel_info.pdest == DISPLAY_1 &&
perf_req->use_ov_blt[0] && !perf_cur->use_ov_blt[0]) ||
dbg_force_ov0_blt) {
if (mfd->panel_info.type == LCDC_PANEL ||
mfd->panel_info.type == LVDS_PANEL)
mdp4_lcdc_overlay_blt_start(mfd);
else if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
mdp4_dsi_video_blt_start(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_blt_start(mfd);
pr_debug("%s mixer0 start blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov_blt[0],
perf_req->use_ov_blt[0]);
perf_cur->use_ov_blt[0] = perf_req->use_ov_blt[0];
}
if ((mfd->panel_info.pdest == DISPLAY_2 &&
perf_req->use_ov_blt[1] && !perf_cur->use_ov_blt[1]) ||
dbg_force_ov1_blt) {
mdp4_dtv_overlay_blt_start(mfd);
pr_debug("%s mixer1 start blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov_blt[1],
perf_req->use_ov_blt[1]);
perf_cur->use_ov_blt[1] = perf_req->use_ov_blt[1];
}
} else {
if (perf_req->mdp_clk_rate < perf_cur->mdp_clk_rate) {
pr_info("%s mdp clk is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_clk_rate,
perf_req->mdp_clk_rate);
mdp_set_core_clk(perf_req->mdp_clk_rate);
perf_cur->mdp_clk_rate =
perf_req->mdp_clk_rate;
}
if (perf_req->mdp_ab_bw < perf_cur->mdp_ab_bw ||
perf_req->mdp_ib_bw < perf_cur->mdp_ib_bw) {
mdp_bus_scale_update_request
(perf_req->mdp_ab_bw, perf_req->mdp_ib_bw);
pr_debug("%s mdp ab bw is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_ab_bw,
perf_req->mdp_ab_bw);
pr_debug("%s mdp ib bw is changed [%d] from %d to %d\n",
__func__,
flag,
perf_cur->mdp_ib_bw,
perf_req->mdp_ib_bw);
perf_cur->mdp_ab_bw = perf_req->mdp_ab_bw;
perf_cur->mdp_ib_bw = perf_req->mdp_ib_bw;
}
if ((mfd->panel_info.pdest == DISPLAY_1 &&
!perf_req->use_ov_blt[0] && perf_cur->use_ov_blt[0]) ||
dbg_force_ov0_blt) {
if (mfd->panel_info.type == LCDC_PANEL ||
mfd->panel_info.type == LVDS_PANEL)
mdp4_lcdc_overlay_blt_stop(mfd);
else if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
mdp4_dsi_video_blt_stop(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_blt_stop(mfd);
pr_debug("%s mixer0 stop blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov_blt[0],
perf_req->use_ov_blt[0]);
perf_cur->use_ov_blt[0] = perf_req->use_ov_blt[0];
}
if ((mfd->panel_info.pdest == DISPLAY_2 &&
!perf_req->use_ov_blt[1] && perf_cur->use_ov_blt[1]) ||
dbg_force_ov1_blt) {
mdp4_dtv_overlay_blt_stop(mfd);
pr_debug("%s mixer1 stop blt [%d] from %d to %d.\n",
__func__,
flag,
perf_cur->use_ov_blt[1],
perf_req->use_ov_blt[1]);
perf_cur->use_ov_blt[1] = perf_req->use_ov_blt[1];
}
}
mutex_unlock(&perf_mutex);
return;
}
static int get_img(struct msmfb_data *img, struct fb_info *info,
struct mdp4_overlay_pipe *pipe, unsigned int plane,
unsigned long *start, unsigned long *len, struct file **srcp_file,
int *p_need, struct ion_handle **srcp_ihdl)
{
struct file *file;
int put_needed, ret = 0, fb_num;
#ifdef CONFIG_ANDROID_PMEM
unsigned long vstart;
#endif
*p_need = 0;
if (img->flags & MDP_BLIT_SRC_GEM) {
*srcp_file = NULL;
return kgsl_gem_obj_addr(img->memory_id, (int) img->priv,
start, len);
}
if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
file = fget_light(img->memory_id, &put_needed);
if (file == NULL)
return -EINVAL;
pipe->flags |= MDP_MEMORY_ID_TYPE_FB;
if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
fb_num = MINOR(file->f_dentry->d_inode->i_rdev);
if (get_fb_phys_info(start, len, fb_num,
DISPLAY_SUBSYSTEM_ID)) {
ret = -1;
} else {
*srcp_file = file;
*p_need = put_needed;
}
} else
ret = -1;
if (ret)
fput_light(file, put_needed);
return ret;
}
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
return mdp4_overlay_iommu_map_buf(img->memory_id, pipe, plane,
start, len, srcp_ihdl);
#endif
#ifdef CONFIG_ANDROID_PMEM
if (!get_pmem_file(img->memory_id, start, &vstart,
len, srcp_file))
return 0;
else
return -EINVAL;
#endif
}
#ifdef CONFIG_FB_MSM_MIPI_DSI
int mdp4_overlay_3d_sbys(struct fb_info *info, struct msmfb_overlay_3d *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int ret = -EPERM;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
mdp4_dsi_cmd_3d_sbys(mfd, req);
ret = 0;
} else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
mdp4_dsi_video_3d_sbys(mfd, req);
ret = 0;
}
mutex_unlock(&mfd->dma->ov_mutex);
return ret;
}
#else
int mdp4_overlay_3d_sbys(struct fb_info *info, struct msmfb_overlay_3d *req)
{
/* do nothing */
return -EPERM;
}
#endif
int mdp4_overlay_blt(struct fb_info *info, struct msmfb_overlay_blt *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
if (mfd == NULL)
return -ENODEV;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_overlay_blt(mfd, req);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_overlay_blt(mfd, req);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_overlay_blt(mfd, req);
else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
mdp4_mddi_overlay_blt(mfd, req);
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req)
{
struct mdp4_overlay_pipe *pipe;
pipe = mdp4_overlay_ndx2pipe(req->id);
if (pipe == NULL)
return -ENODEV;
*req = pipe->req_data;
if (mdp4_overlay_borderfill_supported())
req->flags |= MDP_BORDERFILL_SUPPORTED;
return 0;
}
int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int ret, mixer;
struct mdp4_overlay_pipe *pipe;
if (mfd == NULL) {
pr_err("%s: mfd == NULL, -ENODEV\n", __func__);
return -ENODEV;
}
if (info->node != 0 || mfd->cont_splash_done) /* primary */
if (!mfd->panel_power_on) /* suspended */
return -EPERM;
if (req->src.format == MDP_FB_FORMAT)
req->src.format = mfd->fb_imgType;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex)) {
pr_err("%s: mutex_lock_interruptible, -EINTR\n", __func__);
return -EINTR;
}
mixer = mfd->panel_info.pdest; /* DISPLAY_1 or DISPLAY_2 */
ret = mdp4_overlay_req2pipe(req, mixer, &pipe, mfd);
if (ret < 0) {
mutex_unlock(&mfd->dma->ov_mutex);
pr_err("%s: mdp4_overlay_req2pipe, ret=%d\n", __func__, ret);
return ret;
}
#if (CONFIG_MACH_LGE)
mdp4_calc_pipe_mdp_clk(mfd, pipe);
if(pipe->mixer_num == MDP4_MIXER0
&& pipe->req_clk > mdp_max_clk
&& OVERLAY_TYPE_RGB == mdp4_overlay_format2type(pipe->src_format)) {
pr_err("%s UI blt case, can't compose with MDP directly.\n", __func__);
if(req->id == MSMFB_NEW_REQUEST)
{
mdp4_overlay_pipe_free(pipe,0);
}
mutex_unlock(&mfd->dma->ov_mutex);
return -EINVAL;
}
#endif
if (pipe->flags & MDP_SECURE_OVERLAY_SESSION) {
mdp4_map_sec_resource(mfd);
}
/* return id back to user */
req->id = pipe->pipe_ndx; /* pipe_ndx start from 1 */
pipe->req_data = *req; /* keep original req */
if (!IS_ERR_OR_NULL(mfd->iclient)) {
pr_debug("pipe->flags 0x%x\n", pipe->flags);
if (pipe->flags & MDP_SECURE_OVERLAY_SESSION) {
mfd->mem_hid &= ~BIT(ION_IOMMU_HEAP_ID);
mfd->mem_hid |= ION_SECURE;
} else {
mfd->mem_hid |= BIT(ION_IOMMU_HEAP_ID);
mfd->mem_hid &= ~ION_SECURE;
}
}
mdp4_stat.overlay_set[pipe->mixer_num]++;
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
if (pipe->pipe_num <= OVERLAY_PIPE_VG2)
memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
sizeof(struct mdp_overlay_pp_params));
else
pr_debug("%s: RGB Pipes don't support CSC/QSEED\n",
__func__);
}
mdp4_overlay_mdp_pipe_req(pipe, mfd);
#if (CONFIG_MACH_LGE)
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT) || defined(CONFIG_FB_MSM_MIPI_HITACHI_VIDEO_HD_PT)
if(pipe->mixer_num == MDP4_MIXER0
&& OVERLAY_TYPE_VIDEO == mdp4_overlay_format2type(pipe->src_format)) {
pr_debug("%s video blt mode off, req_clk is max now.\n", __func__);
pipe->req_clk = mdp_max_clk;
}
#else
if(pipe->mixer_num == MDP4_MIXER0
&& pipe->req_clk > mdp_max_clk
&& OVERLAY_TYPE_VIDEO == mdp4_overlay_format2type(pipe->src_format)) {
pr_debug("%s video blt mode off, req_clk is max now.\n", __func__);
pipe->req_clk = mdp_max_clk;
}
#endif
#endif
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
int mdp4_overlay_unset_mixer(int mixer)
{
struct mdp4_overlay_pipe *pipe;
int i, cnt = 0;
/* free pipe besides base layer pipe */
for (i = MDP4_MIXER_STAGE3; i > MDP4_MIXER_STAGE_BASE; i--) {
pipe = ctrl->stage[mixer][i];
if (pipe == NULL)
continue;
pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe, 1);
mdp4_overlay_pipe_free(pipe, 1);
cnt++;
}
return cnt;
}
int mdp4_overlay_unset(struct fb_info *info, int ndx)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct mdp4_overlay_pipe *pipe;
if (mfd == NULL)
return -ENODEV;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
pipe = mdp4_overlay_ndx2pipe(ndx);
if (pipe == NULL) {
mutex_unlock(&mfd->dma->ov_mutex);
return -ENODEV;
}
if (pipe->pipe_type == OVERLAY_TYPE_BF) {
mdp4_overlay_borderfill_stage_down(pipe);
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
if (pipe->mixer_num == MDP4_MIXER2)
ctrl->mixer2_played = 0;
else if (pipe->mixer_num == MDP4_MIXER1)
ctrl->mixer1_played = 0;
else {
/* mixer 0 */
ctrl->mixer0_played = 0;
if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
if (mfd->panel_power_on)
mdp4_mddi_blt_dmap_busy_wait(mfd);
}
}
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe, 0);
if (pipe->blt_forced) {
if (pipe->flags & MDP_SECURE_OVERLAY_SESSION) {
pipe->blt_forced = 0;
pipe->req_clk = 0;
mdp4_overlay_mdp_perf_req(mfd);
}
}
if (pipe->mixer_num == MDP4_MIXER0) {
if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
if (mfd->panel_power_on)
mdp4_mddi_overlay_restore();
}
} else { /* mixer1, DTV, ATV */
if (ctrl->panel_mode & MDP4_PANEL_DTV)
mdp4_overlay_dtv_unset(mfd, pipe);
}
mdp4_stat.overlay_unset[pipe->mixer_num]++;
mdp4_overlay_pipe_free(pipe, 0);
mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
int mdp4_overlay_wait4vsync(struct fb_info *info)
{
if (!hdmi_prim_display && info->node == 0) {
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_wait4vsync(0);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_wait4vsync(0);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_wait4vsync(0);
} else if (hdmi_prim_display || info->node == 1) {
mdp4_dtv_wait4vsync(0);
}
return 0;
}
int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable)
{
int cmd;
if (enable)
cmd = 1;
else
cmd = 0;
if (!hdmi_prim_display && info->node == 0) {
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
mdp4_dsi_video_vsync_ctrl(info, cmd);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_cmd_vsync_ctrl(info, cmd);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_lcdc_vsync_ctrl(info, cmd);
} else if (hdmi_prim_display || info->node == 1)
mdp4_dtv_vsync_ctrl(info, cmd);
return 0;
}
struct tile_desc {
uint32 width; /* tile's width */
uint32 height; /* tile's height */
uint32 row_tile_w; /* tiles per row's width */
uint32 row_tile_h; /* tiles per row's height */
};
void tile_samsung(struct tile_desc *tp)
{
/*
* each row of samsung tile consists of two tiles in height
* and two tiles in width which means width should align to
* 64 x 2 bytes and height should align to 32 x 2 bytes.
* video decoder generate two tiles in width and one tile
* in height which ends up height align to 32 X 1 bytes.
*/
tp->width = 64; /* 64 bytes */
tp->row_tile_w = 2; /* 2 tiles per row's width */
tp->height = 32; /* 32 bytes */
tp->row_tile_h = 1; /* 1 tiles per row's height */
}
uint32 tile_mem_size(struct mdp4_overlay_pipe *pipe, struct tile_desc *tp)
{
uint32 tile_w, tile_h;
uint32 row_num_w, row_num_h;
tile_w = tp->width * tp->row_tile_w;
tile_h = tp->height * tp->row_tile_h;
row_num_w = (pipe->src_width + tile_w - 1) / tile_w;
row_num_h = (pipe->src_height + tile_h - 1) / tile_h;
return ((row_num_w * row_num_h * tile_w * tile_h) + 8191) & ~8191;
}
int mdp4_overlay_play_wait(struct fb_info *info, struct msmfb_overlay_data *req)
{
return 0;
}
/*
* mdp4_overlay_dma_commit: called from dma_done isr
* No mutex/sleep allowed
*/
void mdp4_overlay_dma_commit(int mixer)
{
/*
* non double buffer register update here
* perf level, new clock rate should be done here
*/
}
/*
* mdp4_overlay_vsync_commit: called from tasklet context
* No mutex/sleep allowed
*/
void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe)
{
if (pipe->pipe_type == OVERLAY_TYPE_VIDEO)
mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
else
mdp4_overlay_rgb_setup(pipe); /* rgb pipe */
pr_debug("%s: pipe=%x ndx=%d num=%d used=%d\n", __func__,
(int) pipe, pipe->pipe_ndx, pipe->pipe_num, pipe->pipe_used);
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe, 0);
}
int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct msmfb_data *img;
struct mdp4_overlay_pipe *pipe;
ulong start, addr;
ulong len = 0;
struct ion_handle *srcp0_ihdl = NULL;
struct ion_handle *srcp1_ihdl = NULL, *srcp2_ihdl = NULL;
uint32_t overlay_version = 0;
int ret = 0;
if (mfd == NULL)
return -ENODEV;
pipe = mdp4_overlay_ndx2pipe(req->id);
if (pipe == NULL) {
mdp4_stat.err_play++;
return -ENODEV;
}
if (pipe->pipe_type == OVERLAY_TYPE_BF) {
mdp4_overlay_borderfill_stage_up(pipe);
mdp4_mixer_stage_commit(pipe->mixer_num);
return 0;
}
mutex_lock(&mfd->dma->ov_mutex);
img = &req->data;
get_img(img, info, pipe, 0, &start, &len, &pipe->srcp0_file,
&pipe->put0_need, &srcp0_ihdl);
if (len == 0) {
pr_err("%s: pmem Error\n", __func__);
ret = -1;
goto end;
}
addr = start + img->offset;
pipe->srcp0_addr = addr;
pipe->srcp0_ystride = pipe->src_width * pipe->bpp;
pr_debug("%s: mixer=%d ndx=%x addr=%x flags=%x pid=%d\n", __func__,
pipe->mixer_num, pipe->pipe_ndx, (int)addr, pipe->flags,
current->pid);
if ((req->version_key & VERSION_KEY_MASK) == 0xF9E8D700)
overlay_version = (req->version_key & ~VERSION_KEY_MASK);
if (pipe->fetch_plane == OVERLAY_PLANE_PSEUDO_PLANAR) {
if (overlay_version > 0) {
img = &req->plane1_data;
get_img(img, info, pipe, 1, &start, &len,
&pipe->srcp1_file, &pipe->put1_need,
&srcp1_ihdl);
if (len == 0) {
pr_err("%s: Error to get plane1\n", __func__);
ret = -EINVAL;
goto end;
}
pipe->srcp1_addr = start + img->offset;
} else if (pipe->frame_format ==
MDP4_FRAME_FORMAT_VIDEO_SUPERTILE) {
struct tile_desc tile;
tile_samsung(&tile);
pipe->srcp1_addr = addr + tile_mem_size(pipe, &tile);
} else {
pipe->srcp1_addr = addr + (pipe->src_width *
pipe->src_height);
}
pipe->srcp0_ystride = pipe->src_width;
if ((pipe->src_format == MDP_Y_CRCB_H1V1) ||
(pipe->src_format == MDP_Y_CBCR_H1V1) ||
(pipe->src_format == MDP_Y_CRCB_H1V2) ||
(pipe->src_format == MDP_Y_CBCR_H1V2)) {
if (pipe->src_width > YUV_444_MAX_WIDTH)
pipe->srcp1_ystride = pipe->src_width << 2;
else
pipe->srcp1_ystride = pipe->src_width << 1;
} else
pipe->srcp1_ystride = pipe->src_width;
} else if (pipe->fetch_plane == OVERLAY_PLANE_PLANAR) {
if (overlay_version > 0) {
img = &req->plane1_data;
get_img(img, info, pipe, 1, &start, &len,
&pipe->srcp1_file, &pipe->put1_need,
&srcp1_ihdl);
if (len == 0) {
pr_err("%s: Error to get plane1\n", __func__);
ret = -EINVAL;
goto end;
}
pipe->srcp1_addr = start + img->offset;
img = &req->plane2_data;
get_img(img, info, pipe, 2, &start, &len,
&pipe->srcp2_file, &pipe->put2_need,
&srcp2_ihdl);
if (len == 0) {
pr_err("%s: Error to get plane2\n", __func__);
ret = -EINVAL;
goto end;
}
pipe->srcp2_addr = start + img->offset;
} else {
if (pipe->src_format == MDP_Y_CR_CB_GH2V2) {
addr += (ALIGN(pipe->src_width, 16) *
pipe->src_height);
pipe->srcp1_addr = addr;
addr += ((ALIGN((pipe->src_width / 2), 16)) *
(pipe->src_height / 2));
pipe->srcp2_addr = addr;
} else {
addr += (pipe->src_width * pipe->src_height);
pipe->srcp1_addr = addr;
addr += ((pipe->src_width / 2) *
(pipe->src_height / 2));
pipe->srcp2_addr = addr;
}
}
/* mdp planar format expects Cb in srcp1 and Cr in p2 */
if ((pipe->src_format == MDP_Y_CR_CB_H2V2) ||
(pipe->src_format == MDP_Y_CR_CB_GH2V2))
swap(pipe->srcp1_addr, pipe->srcp2_addr);
if (pipe->src_format == MDP_Y_CR_CB_GH2V2) {
pipe->srcp0_ystride = ALIGN(pipe->src_width, 16);
pipe->srcp1_ystride = ALIGN(pipe->src_width / 2, 16);
pipe->srcp2_ystride = ALIGN(pipe->src_width / 2, 16);
} else {
pipe->srcp0_ystride = pipe->src_width;
pipe->srcp1_ystride = pipe->src_width / 2;
pipe->srcp2_ystride = pipe->src_width / 2;
}
}
mdp4_overlay_mdp_perf_req(mfd);
#if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT)
pipe->mfd = mfd;
#endif
if (pipe->mixer_num == MDP4_MIXER0) {
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
/* cndx = 0 */
mdp4_dsi_cmd_pipe_queue(0, pipe);
} else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
/* cndx = 0 */
mdp4_dsi_video_pipe_queue(0, pipe);
} else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
/* cndx = 0 */
mdp4_lcdc_pipe_queue(0, pipe);
}
} else if (pipe->mixer_num == MDP4_MIXER1) {
if (ctrl->panel_mode & MDP4_PANEL_DTV)
mdp4_dtv_pipe_queue(0, pipe);/* cndx = 0 */
} else if (pipe->mixer_num == MDP4_MIXER2) {
ctrl->mixer2_played++;
if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK)
mdp4_wfd_pipe_queue(0, pipe);/* cndx = 0 */
}
if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
mdp4_iommu_unmap(pipe);
mdp4_stat.overlay_play[pipe->mixer_num]++;
end:
mutex_unlock(&mfd->dma->ov_mutex);
return ret;
}
int mdp4_overlay_commit(struct fb_info *info)
{
int ret = 0, release_busy = true;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
int mixer;
if (mfd == NULL) {
ret = -ENODEV;
goto mdp4_overlay_commit_exit;
}
if (!mfd->panel_power_on) {
ret = -EINVAL;
goto mdp4_overlay_commit_exit;
}
mixer = mfd->panel_info.pdest; /* DISPLAY_1 or DISPLAY_2 */
if (mixer >= MDP4_MIXER_MAX)
return -EPERM;
mutex_lock(&mfd->dma->ov_mutex);
msm_fb_wait_for_fence(mfd);
switch (mfd->panel.type) {
case MIPI_CMD_PANEL:
mdp4_dsi_cmd_pipe_commit(0, 1, &release_busy);
break;
case MIPI_VIDEO_PANEL:
mdp4_dsi_video_pipe_commit(0, 1);
break;
case LCDC_PANEL:
mdp4_lcdc_pipe_commit(0, 1);
break;
case DTV_PANEL:
mdp4_dtv_pipe_commit(0, 1);
break;
case WRITEBACK_PANEL:
mdp4_wfd_pipe_commit(mfd, 0, 1);
break;
default:
pr_err("Panel Not Supported for Commit");
ret = -EINVAL;
break;
}
msm_fb_signal_timeline(mfd);
mdp4_unmap_sec_resource(mfd);
if (release_busy)
mutex_unlock(&mfd->dma->ov_mutex);
mdp4_overlay_commit_exit:
if (release_busy)
msm_fb_release_busy(mfd);
return ret;
}
void mdp4_overlay_commit_finish(struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
mdp4_overlay_mdp_perf_upd(mfd, 0);
}
struct msm_iommu_ctx {
char *name;
int domain;
};
static struct msm_iommu_ctx msm_iommu_ctx_names[] = {
/* Display read*/
{
.name = "mdp_port0_cb0",
.domain = DISPLAY_READ_DOMAIN,
},
/* Display read*/
{
.name = "mdp_port0_cb1",
.domain = DISPLAY_READ_DOMAIN,
},
/* Display write */
{
.name = "mdp_port1_cb0",
.domain = DISPLAY_READ_DOMAIN,
},
/* Display write */
{
.name = "mdp_port1_cb1",
.domain = DISPLAY_READ_DOMAIN,
},
};
static struct msm_iommu_ctx msm_iommu_split_ctx_names[] = {
/* Display read*/
{
.name = "mdp_port0_cb0",
.domain = DISPLAY_READ_DOMAIN,
},
/* Display read*/
{
.name = "mdp_port0_cb1",
.domain = DISPLAY_WRITE_DOMAIN,
},
/* Display write */
{
.name = "mdp_port1_cb0",
.domain = DISPLAY_READ_DOMAIN,
},
/* Display write */
{
.name = "mdp_port1_cb1",
.domain = DISPLAY_WRITE_DOMAIN,
},
};
void mdp4_iommu_attach(void)
{
static int done;
struct msm_iommu_ctx *ctx_names;
struct iommu_domain *domain;
int i, arr_size;
if (!done) {
if (mdp_iommu_split_domain) {
ctx_names = msm_iommu_split_ctx_names;
arr_size = ARRAY_SIZE(msm_iommu_split_ctx_names);
} else {
ctx_names = msm_iommu_ctx_names;
arr_size = ARRAY_SIZE(msm_iommu_ctx_names);
}
for (i = 0; i < arr_size; i++) {
int domain_idx;
struct device *ctx = msm_iommu_get_ctx(
ctx_names[i].name);
if (!ctx)
continue;
domain_idx = ctx_names[i].domain;
domain = msm_get_iommu_domain(domain_idx);
if (!domain)
continue;
if (iommu_attach_device(domain, ctx)) {
WARN(1, "%s: could not attach domain %d to context %s."
" iommu programming will not occur.\n",
__func__, domain_idx,
ctx_names[i].name);
continue;
}
}
done = 1;
}
}
int mdp4_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req,
struct mdp4_overlay_pipe **ppipe)
{
struct mdp4_overlay_pipe *pipe;
int err;
struct msm_fb_data_type *mfb = info->par;
req->z_order = 0;
req->id = MSMFB_NEW_REQUEST;
req->is_fg = false;
req->alpha = 0xff;
err = mdp4_overlay_req2pipe(req, MDP4_MIXER0, &pipe, mfb);
if (err < 0) {
pr_err("%s:Could not allocate MDP overlay pipe\n", __func__);
return err;
}
mdp4_mixer_blend_setup(pipe->mixer_num);
*ppipe = pipe;
return 0;
}
void mdp4_v4l2_overlay_clear(struct mdp4_overlay_pipe *pipe)
{
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe, 1);
mdp4_overlay_pipe_free(pipe, 1);
}
int mdp4_v4l2_overlay_play(struct fb_info *info, struct mdp4_overlay_pipe *pipe,
unsigned long srcp0_addr, unsigned long srcp1_addr,
unsigned long srcp2_addr)
{
struct msm_fb_data_type *mfd = info->par;
int err;
if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
return -EINTR;
switch (pipe->src_format) {
case MDP_Y_CR_CB_H2V2:
/* YUV420 */
pipe->srcp0_addr = srcp0_addr;
pipe->srcp0_ystride = pipe->src_width;
/*
* For YUV420, the luma plane is 1 byte per pixel times
* num of pixels in the image Also, the planes are
* switched in MDP, srcp2 is actually first chroma plane
*/
pipe->srcp2_addr = srcp1_addr ? srcp1_addr :
pipe->srcp0_addr + (pipe->src_width * pipe->src_height);
pipe->srcp2_ystride = pipe->src_width/2;
/*
* The chroma planes are half the size of the luma
* planes
*/
pipe->srcp1_addr = srcp2_addr ? srcp2_addr :
pipe->srcp2_addr +
(pipe->src_width * pipe->src_height / 4);
pipe->srcp1_ystride = pipe->src_width/2;
break;
case MDP_Y_CRCB_H2V2:
/* NV12 */
pipe->srcp0_addr = srcp0_addr;
pipe->srcp0_ystride = pipe->src_width;
pipe->srcp1_addr = srcp1_addr ? srcp1_addr :
pipe->srcp0_addr +
(pipe->src_width * pipe->src_height);
pipe->srcp1_ystride = pipe->src_width;
break;
default:
pr_err("%s: format (%u) is not supported\n", __func__,
pipe->src_format);
err = -EINVAL;
goto done;
}
pr_debug("%s: pipe ndx=%d stage=%d format=%x\n", __func__,
pipe->pipe_ndx, pipe->mixer_stage, pipe->src_format);
if (pipe->pipe_type == OVERLAY_TYPE_VIDEO)
mdp4_overlay_vg_setup(pipe);
else
mdp4_overlay_rgb_setup(pipe);
if (ctrl->panel_mode & MDP4_PANEL_LCDC)
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe, 0); /* mixer stage commit commits this */
mdp4_mixer_stage_commit(pipe->mixer_num);
#ifdef V4L2_VSYNC
/*
* TODO: incorporate v4l2 into vsycn driven mechanism
*/
if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
mdp4_overlay_lcdc_vsync_push(mfd, pipe);
} else {
#ifdef CONFIG_FB_MSM_MIPI_DSI
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
mdp4_dsi_cmd_dma_busy_wait(mfd);
mdp4_dsi_cmd_kickoff_video(mfd, pipe);
}
#else
if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
mdp4_mddi_dma_busy_wait(mfd);
mdp4_mddi_kickoff_video(mfd, pipe);
}
#endif
}
#endif
done:
mutex_unlock(&mfd->dma->ov_mutex);
return err;
}
int mdp4_overlay_reset()
{
memset(&perf_request, 0, sizeof(perf_request));
memset(&perf_current, 0, sizeof(perf_current));
return 0;
}
| OlegKyiashko/LGOGP-kernel | drivers/video/msm/mdp4_overlay.c | C | gpl-2.0 | 112,817 |
/*
* linux/drivers/mmc/core/core.c
*
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
* MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
* Copyright (C) 2016 XiaoMi, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/devfreq.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/leds.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
#include <linux/suspend.h>
#include <linux/fault-inject.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/jiffies.h>
#include <trace/events/mmc.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/slot-gpio.h>
#include "core.h"
#include "bus.h"
#include "host.h"
#include "sdio_bus.h"
#include "mmc_ops.h"
#include "sd_ops.h"
#include "sdio_ops.h"
/* If the device is not responding */
#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
/*
* Background operations can take a long time, depending on the housekeeping
* operations the card has to perform.
*/
#define MMC_BKOPS_MAX_TIMEOUT (30 * 1000) /* max time to wait in ms */
static struct workqueue_struct *workqueue;
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
* performance cost, and for other reasons may not always be desired.
* So we allow it it to be disabled.
*/
bool use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
static int mmc_schedule_delayed_work(struct delayed_work *work,
unsigned long delay)
{
return queue_delayed_work(workqueue, work, delay);
}
/*
* Internal function. Flush all scheduled work from the MMC work queue.
*/
static void mmc_flush_scheduled_work(void)
{
flush_workqueue(workqueue);
}
#ifdef CONFIG_FAIL_MMC_REQUEST
/*
* Internal function. Inject random data errors.
* If mmc_data is NULL no errors are injected.
*/
static void mmc_should_fail_request(struct mmc_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = mrq->data;
static const int data_errors[] = {
-ETIMEDOUT,
-EILSEQ,
-EIO,
};
if (!data)
return;
if (cmd->error || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
data->fault_injected = true;
}
#else /* CONFIG_FAIL_MMC_REQUEST */
static inline void mmc_should_fail_request(struct mmc_host *host,
struct mmc_request *mrq)
{
}
#endif /* CONFIG_FAIL_MMC_REQUEST */
static bool mmc_is_data_request(struct mmc_request *mmc_request)
{
switch (mmc_request->cmd->opcode) {
case MMC_READ_SINGLE_BLOCK:
case MMC_READ_MULTIPLE_BLOCK:
case MMC_WRITE_BLOCK:
case MMC_WRITE_MULTIPLE_BLOCK:
return true;
default:
return false;
}
}
static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
{
struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
if (!clk_scaling->enable)
return;
if (lock_needed)
spin_lock_bh(&clk_scaling->lock);
clk_scaling->start_busy = ktime_get();
clk_scaling->is_busy_started = true;
if (lock_needed)
spin_unlock_bh(&clk_scaling->lock);
}
static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)
{
struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
if (!clk_scaling->enable)
return;
if (lock_needed)
spin_lock_bh(&clk_scaling->lock);
if (!clk_scaling->is_busy_started) {
WARN_ON(1);
goto out;
}
clk_scaling->total_busy_time_us +=
ktime_to_us(ktime_sub(ktime_get(),
clk_scaling->start_busy));
pr_debug("%s: accumulated busy time is %lu usec\n",
mmc_hostname(host), clk_scaling->total_busy_time_us);
clk_scaling->is_busy_started = false;
out:
if (lock_needed)
spin_unlock_bh(&clk_scaling->lock);
}
/**
* mmc_cmdq_clk_scaling_start_busy() - start busy timer for data requests
* @host: pointer to mmc host structure
* @lock_needed: flag indication if locking is needed
*
* This function starts the busy timer in case it was not already started.
*/
void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
bool lock_needed)
{
if (!host->clk_scaling.enable)
return;
if (lock_needed)
spin_lock_bh(&host->clk_scaling.lock);
if (!host->clk_scaling.is_busy_started &&
!test_bit(CMDQ_STATE_DCMD_ACTIVE,
&host->cmdq_ctx.curr_state)) {
host->clk_scaling.start_busy = ktime_get();
host->clk_scaling.is_busy_started = true;
}
if (lock_needed)
spin_unlock_bh(&host->clk_scaling.lock);
}
EXPORT_SYMBOL(mmc_cmdq_clk_scaling_start_busy);
/**
* mmc_cmdq_clk_scaling_stop_busy() - stop busy timer for last data requests
* @host: pointer to mmc host structure
* @lock_needed: flag indication if locking is needed
*
* This function stops the busy timer in case it is the last data request.
* In case the current request is not the last one, the busy time till
* now will be accumulated and the counter will be restarted.
*/
void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
bool lock_needed, bool is_cmdq_dcmd)
{
if (!host->clk_scaling.enable)
return;
if (lock_needed)
spin_lock_bh(&host->clk_scaling.lock);
/*
* For CQ mode: In completion of DCMD request, start busy time in
* case of pending data requests
*/
if (is_cmdq_dcmd) {
if (host->cmdq_ctx.data_active_reqs) {
host->clk_scaling.is_busy_started = true;
host->clk_scaling.start_busy = ktime_get();
}
goto out;
}
host->clk_scaling.total_busy_time_us +=
ktime_to_us(ktime_sub(ktime_get(),
host->clk_scaling.start_busy));
if (host->cmdq_ctx.data_active_reqs) {
host->clk_scaling.is_busy_started = true;
host->clk_scaling.start_busy = ktime_get();
} else {
host->clk_scaling.is_busy_started = false;
}
out:
if (lock_needed)
spin_unlock_bh(&host->clk_scaling.lock);
}
EXPORT_SYMBOL(mmc_cmdq_clk_scaling_stop_busy);
/**
* mmc_can_scale_clk() - Check clock scaling capability
* @host: pointer to mmc host structure
*/
bool mmc_can_scale_clk(struct mmc_host *host)
{
if (!host) {
pr_err("bad host parameter\n");
WARN_ON(1);
return false;
}
return host->caps2 & MMC_CAP2_CLK_SCALE;
}
EXPORT_SYMBOL(mmc_can_scale_clk);
static int mmc_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
struct mmc_devfeq_clk_scaling *clk_scaling;
if (!host) {
pr_err("bad host parameter\n");
WARN_ON(1);
return -EINVAL;
}
clk_scaling = &host->clk_scaling;
if (!clk_scaling->enable)
return 0;
spin_lock_bh(&clk_scaling->lock);
/* accumulate the busy time of ongoing work */
memset(status, 0, sizeof(*status));
if (clk_scaling->is_busy_started) {
if (mmc_card_cmdq(host->card)) {
/* the "busy-timer" will be restarted in case there
* are pending data requests */
mmc_cmdq_clk_scaling_stop_busy(host, false, false);
} else {
mmc_clk_scaling_stop_busy(host, false);
mmc_clk_scaling_start_busy(host, false);
}
}
status->busy_time = clk_scaling->total_busy_time_us;
status->total_time = ktime_to_us(ktime_sub(ktime_get(),
clk_scaling->measure_interval_start));
clk_scaling->total_busy_time_us = 0;
status->current_frequency = clk_scaling->curr_freq;
clk_scaling->measure_interval_start = ktime_get();
pr_debug("%s: status: load = %lu%% - total_time=%lu busy_time = %lu, clk=%lu\n",
mmc_hostname(host),
(status->busy_time*100)/status->total_time,
status->total_time, status->busy_time,
status->current_frequency);
spin_unlock_bh(&clk_scaling->lock);
return 0;
}
static bool mmc_is_valid_state_for_clk_scaling(struct mmc_host *host)
{
struct mmc_card *card = host->card;
u32 status;
/*
* If the current partition type is RPMB, clock switching may not
* work properly as sending tuning command (CMD21) is illegal in
* this mode.
*/
if (!card || (mmc_card_mmc(card) &&
(card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB ||
mmc_card_doing_bkops(card))))
return false;
if (mmc_send_status(card, &status)) {
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
return false;
}
return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
}
int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
{
int err = 0;
err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
(!host->cmdq_ctx.active_reqs));
if (host->cmdq_ctx.active_reqs) {
pr_err("%s: %s: unexpected active requests (%lu)\n",
mmc_hostname(host), __func__,
host->cmdq_ctx.active_reqs);
return -EPERM;
}
err = mmc_cmdq_halt(host, true);
if (err) {
pr_err("%s: %s: mmc_cmdq_halt failed (%d)\n",
mmc_hostname(host), __func__, err);
goto out;
}
out:
return err;
}
EXPORT_SYMBOL(mmc_cmdq_halt_on_empty_queue);
int mmc_clk_update_freq(struct mmc_host *host,
unsigned long freq, enum mmc_load state)
{
int err = 0;
bool cmdq_mode;
if (!host) {
pr_err("bad host parameter\n");
WARN_ON(1);
return -EINVAL;
}
mmc_host_clk_hold(host);
cmdq_mode = mmc_card_cmdq(host->card);
/* make sure the card supports the frequency we want */
if (unlikely(freq > host->card->clk_scaling_highest)) {
freq = host->card->clk_scaling_highest;
pr_warn("%s: %s: frequency was overridden to %lu\n",
mmc_hostname(host), __func__,
host->card->clk_scaling_highest);
}
if (unlikely(freq < host->card->clk_scaling_lowest)) {
freq = host->card->clk_scaling_lowest;
pr_warn("%s: %s: frequency was overridden to %lu\n",
mmc_hostname(host), __func__,
host->card->clk_scaling_lowest);
}
if (freq == host->clk_scaling.curr_freq)
goto out;
if (host->ops->notify_load) {
err = host->ops->notify_load(host, state);
if (err) {
pr_err("%s: %s: fail on notify_load\n",
mmc_hostname(host), __func__);
goto out;
}
}
if (cmdq_mode) {
err = mmc_cmdq_halt_on_empty_queue(host);
if (err) {
pr_err("%s: %s: failed halting queue (%d)\n",
mmc_hostname(host), __func__, err);
goto halt_failed;
}
}
if (!mmc_is_valid_state_for_clk_scaling(host)) {
pr_debug("%s: invalid state for clock scaling - skipping",
mmc_hostname(host));
goto invalid_state;
}
err = host->bus_ops->change_bus_speed(host, &freq);
if (!err)
host->clk_scaling.curr_freq = freq;
else
pr_err("%s: %s: failed (%d) at freq=%lu\n",
mmc_hostname(host), __func__, err, freq);
invalid_state:
if (cmdq_mode) {
if (mmc_cmdq_halt(host, false))
pr_err("%s: %s: cmdq unhalt failed\n",
mmc_hostname(host), __func__);
}
halt_failed:
if (err) {
/* restore previous state */
if (host->ops->notify_load)
if (host->ops->notify_load(host,
host->clk_scaling.state))
pr_err("%s: %s: fail on notify_load restore\n",
mmc_hostname(host), __func__);
}
out:
mmc_host_clk_release(host);
return err;
}
EXPORT_SYMBOL(mmc_clk_update_freq);
static int mmc_devfreq_set_target(struct device *dev,
unsigned long *freq, u32 devfreq_flags)
{
struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
struct mmc_devfeq_clk_scaling *clk_scaling;
int err = 0;
int abort;
if (!(host && freq)) {
pr_err("%s: unexpected host/freq parameter\n", __func__);
err = -EINVAL;
goto out;
}
clk_scaling = &host->clk_scaling;
if (!clk_scaling->enable)
goto out;
pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
*freq, current->comm);
if ((clk_scaling->curr_freq == *freq) ||
clk_scaling->skip_clk_scale_freq_update)
goto out;
/* No need to scale the clocks if they are gated */
if (!host->ios.clock)
goto out;
spin_lock_bh(&clk_scaling->lock);
if (clk_scaling->clk_scaling_in_progress) {
pr_debug("%s: clocks scaling is already in-progress by mmc thread\n",
mmc_hostname(host));
spin_unlock_bh(&clk_scaling->lock);
goto out;
}
clk_scaling->need_freq_change = true;
clk_scaling->target_freq = *freq;
clk_scaling->state = *freq < clk_scaling->curr_freq ?
MMC_LOAD_LOW : MMC_LOAD_HIGH;
spin_unlock_bh(&clk_scaling->lock);
abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort);
if (abort)
goto out;
/*
* In case we were able to claim host there is no need to
* defer the frequency change. It will be done now
*/
clk_scaling->need_freq_change = false;
mmc_host_clk_hold(host);
err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
if (err && err != -EAGAIN)
pr_err("%s: clock scale to %lu failed with error %d\n",
mmc_hostname(host), *freq, err);
else
pr_debug("%s: clock change to %lu finished successfully (%s)\n",
mmc_hostname(host), *freq, current->comm);
mmc_host_clk_release(host);
mmc_release_host(host);
out:
return err;
}
/**
* mmc_deferred_scaling() - scale clocks from data path (mmc thread context)
* @host: pointer to mmc host structure
*
* This function does clock scaling in case "need_freq_change" flag was set
* by the clock scaling logic.
*/
void mmc_deferred_scaling(struct mmc_host *host)
{
unsigned long target_freq;
int err;
if (!host->clk_scaling.enable)
return;
spin_lock_bh(&host->clk_scaling.lock);
if (host->clk_scaling.clk_scaling_in_progress ||
!(host->clk_scaling.need_freq_change)) {
spin_unlock_bh(&host->clk_scaling.lock);
return;
}
atomic_inc(&host->clk_scaling.devfreq_abort);
target_freq = host->clk_scaling.target_freq;
host->clk_scaling.clk_scaling_in_progress = true;
host->clk_scaling.need_freq_change = false;
spin_unlock_bh(&host->clk_scaling.lock);
pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
mmc_hostname(host),
target_freq, current->comm);
err = mmc_clk_update_freq(host, target_freq,
host->clk_scaling.state);
if (err && err != -EAGAIN)
pr_err("%s: failed on deferred scale clocks (%d)\n",
mmc_hostname(host), err);
else
pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
mmc_hostname(host),
target_freq, current->comm);
host->clk_scaling.clk_scaling_in_progress = false;
atomic_dec(&host->clk_scaling.devfreq_abort);
}
EXPORT_SYMBOL(mmc_deferred_scaling);
static int mmc_devfreq_create_freq_table(struct mmc_host *host)
{
int i;
struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
pr_debug("%s: supported: lowest=%lu, highest=%lu\n",
mmc_hostname(host),
host->card->clk_scaling_lowest,
host->card->clk_scaling_highest);
if (!clk_scaling->freq_table) {
pr_debug("%s: no frequency table defined - setting default\n",
mmc_hostname(host));
clk_scaling->freq_table = kzalloc(
2*sizeof(*(clk_scaling->freq_table)), GFP_KERNEL);
if (!clk_scaling->freq_table)
return -ENOMEM;
clk_scaling->freq_table[0] = host->card->clk_scaling_lowest;
clk_scaling->freq_table[1] = host->card->clk_scaling_highest;
clk_scaling->freq_table_sz = 2;
goto out;
}
if (host->card->clk_scaling_lowest >
clk_scaling->freq_table[0])
pr_debug("%s: frequency table undershot possible freq\n",
mmc_hostname(host));
if (strcmp(mmc_hostname(host), "mmc1") == 0) {
clk_scaling->freq_table[0] = host->card->clk_scaling_highest;
} else {
for (i = 0; i < clk_scaling->freq_table_sz; i++) {
if (clk_scaling->freq_table[i] < host->card->clk_scaling_highest) {
continue;
} else {
break;
}
}
clk_scaling->freq_table[i] = host->card->clk_scaling_highest;
clk_scaling->freq_table_sz = i + 1;
}
out:
clk_scaling->devfreq_profile.freq_table = clk_scaling->freq_table;
clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz;
for (i = 0; i < clk_scaling->freq_table_sz; i++)
pr_debug("%s: freq[%d] = %u\n",
mmc_hostname(host), i, clk_scaling->freq_table[i]);
return 0;
}
/**
* mmc_init_devfreq_clk_scaling() - Initialize clock scaling
* @host: pointer to mmc host structure
*
* Initialize clock scaling for supported hosts. It is assumed that the caller
* ensure clock is running at maximum possible frequency before calling this
* function. Shall use struct devfreq_simple_ondemand_data to configure
* governor.
*/
int mmc_init_clk_scaling(struct mmc_host *host)
{
int err;
if (!host || !host->card) {
pr_err("%s: unexpected host/card parameters\n",
__func__);
return -EINVAL;
}
if (!mmc_can_scale_clk(host) ||
!host->bus_ops->change_bus_speed) {
pr_debug("%s: clock scaling is not supported\n",
mmc_hostname(host));
return 0;
}
pr_debug("registering %s dev (%p) to devfreq",
mmc_hostname(host),
mmc_classdev(host));
if (host->clk_scaling.devfreq) {
pr_err("%s: dev is already registered for dev %p\n",
mmc_hostname(host),
mmc_dev(host));
return -EPERM;
}
spin_lock_init(&host->clk_scaling.lock);
atomic_set(&host->clk_scaling.devfreq_abort, 0);
host->clk_scaling.curr_freq = host->ios.clock;
host->clk_scaling.clk_scaling_in_progress = false;
host->clk_scaling.need_freq_change = false;
host->clk_scaling.is_busy_started = false;
host->clk_scaling.devfreq_profile.polling_ms =
host->clk_scaling.polling_delay_ms;
host->clk_scaling.devfreq_profile.get_dev_status =
mmc_devfreq_get_dev_status;
host->clk_scaling.devfreq_profile.target = mmc_devfreq_set_target;
host->clk_scaling.devfreq_profile.initial_freq = host->ios.clock;
host->clk_scaling.ondemand_gov_data.simple_scaling = true;
host->clk_scaling.ondemand_gov_data.upthreshold =
host->clk_scaling.upthreshold;
host->clk_scaling.ondemand_gov_data.downdifferential =
host->clk_scaling.upthreshold - host->clk_scaling.downthreshold;
err = mmc_devfreq_create_freq_table(host);
if (err) {
pr_err("%s: fail to create devfreq frequency table\n",
mmc_hostname(host));
return err;
}
pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
mmc_hostname(host),
host->clk_scaling.ondemand_gov_data.upthreshold,
host->clk_scaling.ondemand_gov_data.downdifferential,
host->clk_scaling.devfreq_profile.polling_ms);
host->clk_scaling.devfreq = devfreq_add_device(
mmc_classdev(host),
&host->clk_scaling.devfreq_profile,
"simple_ondemand",
&host->clk_scaling.ondemand_gov_data);
if (!host->clk_scaling.devfreq) {
pr_err("%s: unable to register with devfreq\n",
mmc_hostname(host));
return -EPERM;
}
pr_debug("%s: clk scaling is enabled for device %s (%p) with devfreq %p (clock = %uHz)\n",
mmc_hostname(host),
dev_name(mmc_classdev(host)),
mmc_classdev(host),
host->clk_scaling.devfreq,
host->ios.clock);
host->clk_scaling.enable = true;
return err;
}
EXPORT_SYMBOL(mmc_init_clk_scaling);
/**
* mmc_suspend_clk_scaling() - suspend clock scaling
* @host: pointer to mmc host structure
*
* This API will suspend devfreq feature for the specific host.
* The statistics collected by mmc will be cleared.
* This function is intended to be called by the pm callbacks
* (e.g. runtime_suspend, suspend) of the mmc device
*/
int mmc_suspend_clk_scaling(struct mmc_host *host)
{
int err;
if (!host) {
WARN(1, "bad host parameter\n");
return -EINVAL;
}
if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable)
return 0;
if (!host->clk_scaling.devfreq) {
pr_err("%s: %s: no devfreq is assosiated with this device\n",
mmc_hostname(host), __func__);
return -EPERM;
}
atomic_inc(&host->clk_scaling.devfreq_abort);
wake_up(&host->wq);
err = devfreq_suspend_device(host->clk_scaling.devfreq);
if (err) {
pr_err("%s: %s: failed to suspend devfreq\n",
mmc_hostname(host), __func__);
return err;
}
host->clk_scaling.enable = false;
host->clk_scaling.total_busy_time_us = 0;
pr_debug("%s: devfreq suspended\n", mmc_hostname(host));
return 0;
}
EXPORT_SYMBOL(mmc_suspend_clk_scaling);
/**
* mmc_resume_clk_scaling() - resume clock scaling
* @host: pointer to mmc host structure
*
* This API will resume devfreq feature for the specific host.
* This API is intended to be called by the pm callbacks
* (e.g. runtime_suspend, suspend) of the mmc device
*/
int mmc_resume_clk_scaling(struct mmc_host *host)
{
int err = 0;
u32 max_clk_idx = 0;
u32 devfreq_max_clk = 0;
u32 devfreq_min_clk = 0;
if (!host) {
WARN(1, "bad host parameter\n");
return -EINVAL;
}
if (!mmc_can_scale_clk(host))
return 0;
if (!host->clk_scaling.devfreq) {
pr_err("%s: %s: no devfreq is assosiated with this device\n",
mmc_hostname(host), __func__);
return -EPERM;
}
atomic_set(&host->clk_scaling.devfreq_abort, 0);
max_clk_idx = host->clk_scaling.freq_table_sz - 1;
devfreq_max_clk = host->clk_scaling.freq_table[max_clk_idx];
devfreq_min_clk = host->clk_scaling.freq_table[0];
host->clk_scaling.curr_freq = devfreq_max_clk;
if (host->ios.clock < host->card->clk_scaling_highest)
host->clk_scaling.curr_freq = devfreq_min_clk;
host->clk_scaling.clk_scaling_in_progress = false;
host->clk_scaling.need_freq_change = false;
err = devfreq_resume_device(host->clk_scaling.devfreq);
if (err) {
pr_err("%s: %s: failed to resume devfreq (%d)\n",
mmc_hostname(host), __func__, err);
} else {
host->clk_scaling.enable = true;
pr_debug("%s: devfreq resumed\n", mmc_hostname(host));
}
return err;
}
EXPORT_SYMBOL(mmc_resume_clk_scaling);
/**
* mmc_exit_devfreq_clk_scaling() - Disable clock scaling
* @host: pointer to mmc host structure
*
* Disable clock scaling permanently.
*/
int mmc_exit_clk_scaling(struct mmc_host *host)
{
int err;
if (!host) {
pr_err("%s: bad host parameter\n", __func__);
WARN_ON(1);
return -EINVAL;
}
if (!mmc_can_scale_clk(host))
return 0;
if (!host->clk_scaling.devfreq) {
pr_err("%s: %s: no devfreq is assosiated with this device\n",
mmc_hostname(host), __func__);
return -EPERM;
}
err = mmc_suspend_clk_scaling(host);
if (err) {
pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
mmc_hostname(host), __func__, err);
return err;
}
err = devfreq_remove_device(host->clk_scaling.devfreq);
if (err) {
pr_err("%s: remove devfreq failed (%d)\n",
mmc_hostname(host), err);
return err;
}
host->clk_scaling.devfreq = NULL;
atomic_set(&host->clk_scaling.devfreq_abort, 1);
pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
return 0;
}
EXPORT_SYMBOL(mmc_exit_clk_scaling);
/**
* mmc_request_done - finish processing an MMC request
* @host: MMC host which completed request
* @mrq: MMC request which request
*
* MMC drivers should call this function when they have completed
* their processing of a request.
*/
void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
int err = cmd->error;
#ifdef CONFIG_MMC_PERF_PROFILING
ktime_t diff;
#endif
if (host->clk_scaling.is_busy_started)
mmc_clk_scaling_stop_busy(host, true);
if (err && cmd->retries && mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
cmd->retries = 0;
}
if (err && cmd->retries && !mmc_card_removed(host->card)) {
/*
* Request starter must handle retries - see
* mmc_wait_for_req_done().
*/
if (mrq->done)
mrq->done(mrq);
} else {
mmc_should_fail_request(host, mrq);
led_trigger_event(host->led, LED_OFF);
pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
mmc_hostname(host), cmd->opcode, err,
cmd->resp[0], cmd->resp[1],
cmd->resp[2], cmd->resp[3]);
if (mrq->data) {
#ifdef CONFIG_MMC_PERF_PROFILING
if (host->perf_enable) {
diff = ktime_sub(ktime_get(), host->perf.start);
if (mrq->data->flags == MMC_DATA_READ) {
host->perf.rbytes_drv +=
mrq->data->bytes_xfered;
host->perf.rtime_drv =
ktime_add(host->perf.rtime_drv,
diff);
} else {
host->perf.wbytes_drv +=
mrq->data->bytes_xfered;
host->perf.wtime_drv =
ktime_add(host->perf.wtime_drv,
diff);
}
}
#endif
pr_debug("%s: %d bytes transferred: %d\n",
mmc_hostname(host),
mrq->data->bytes_xfered, mrq->data->error);
trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
}
if (mrq->stop) {
pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
mmc_hostname(host), mrq->stop->opcode,
mrq->stop->error,
mrq->stop->resp[0], mrq->stop->resp[1],
mrq->stop->resp[2], mrq->stop->resp[3]);
}
if (mrq->done)
mrq->done(mrq);
mmc_host_clk_release(host);
}
}
EXPORT_SYMBOL(mmc_request_done);
static void
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
#ifdef CONFIG_MMC_DEBUG
unsigned int i, sz;
struct scatterlist *sg;
#endif
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
mmc_hostname(host), mrq->sbc->opcode,
mrq->sbc->arg, mrq->sbc->flags);
}
pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
mmc_hostname(host), mrq->cmd->opcode,
mrq->cmd->arg, mrq->cmd->flags);
if (mrq->data) {
pr_debug("%s: blksz %d blocks %d flags %08x "
"tsac %d ms nsac %d\n",
mmc_hostname(host), mrq->data->blksz,
mrq->data->blocks, mrq->data->flags,
mrq->data->timeout_ns / 1000000,
mrq->data->timeout_clks);
}
if (mrq->stop) {
pr_debug("%s: CMD%u arg %08x flags %08x\n",
mmc_hostname(host), mrq->stop->opcode,
mrq->stop->arg, mrq->stop->flags);
}
WARN_ON(!host->claimed);
mrq->cmd->error = 0;
mrq->cmd->mrq = mrq;
if (mrq->data) {
BUG_ON(mrq->data->blksz > host->max_blk_size);
BUG_ON(mrq->data->blocks > host->max_blk_count);
BUG_ON(mrq->data->blocks * mrq->data->blksz >
host->max_req_size);
#ifdef CONFIG_MMC_DEBUG
sz = 0;
for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
sz += sg->length;
BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
#endif
mrq->cmd->data = mrq->data;
mrq->data->error = 0;
mrq->data->mrq = mrq;
if (mrq->stop) {
mrq->data->stop = mrq->stop;
mrq->stop->error = 0;
mrq->stop->mrq = mrq;
}
#ifdef CONFIG_MMC_PERF_PROFILING
if (host->perf_enable)
host->perf.start = ktime_get();
#endif
}
mmc_host_clk_hold(host);
led_trigger_event(host->led, LED_FULL);
if (mmc_is_data_request(mrq)) {
mmc_deferred_scaling(host);
mmc_clk_scaling_start_busy(host, true);
}
host->ops->request(host, mrq);
}
static void mmc_start_cmdq_request(struct mmc_host *host,
struct mmc_request *mrq)
{
if (mrq->data) {
pr_debug("%s: blksz %d blocks %d flags %08x tsac %lu ms nsac %d\n",
mmc_hostname(host), mrq->data->blksz,
mrq->data->blocks, mrq->data->flags,
mrq->data->timeout_ns / NSEC_PER_MSEC,
mrq->data->timeout_clks);
BUG_ON(mrq->data->blksz > host->max_blk_size);
BUG_ON(mrq->data->blocks > host->max_blk_count);
BUG_ON(mrq->data->blocks * mrq->data->blksz >
host->max_req_size);
mrq->data->error = 0;
mrq->data->mrq = mrq;
}
if (mrq->cmd) {
mrq->cmd->error = 0;
mrq->cmd->mrq = mrq;
}
mmc_host_clk_hold(host);
if (likely(host->cmdq_ops->request))
host->cmdq_ops->request(host, mrq);
else
pr_err("%s: %s: issue request failed\n", mmc_hostname(host),
__func__);
}
/**
* mmc_blk_init_bkops_statistics - initialize bkops statistics
* @card: MMC card to start BKOPS
*
* Initialize and enable the bkops statistics
*/
void mmc_blk_init_bkops_statistics(struct mmc_card *card)
{
int i;
struct mmc_bkops_stats *stats;
if (!card)
return;
stats = &card->bkops.stats;
spin_lock(&stats->lock);
stats->manual_start = 0;
stats->hpi = 0;
stats->auto_start = 0;
stats->auto_stop = 0;
for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; i++)
stats->level[i] = 0;
stats->enabled = true;
spin_unlock(&stats->lock);
}
EXPORT_SYMBOL(mmc_blk_init_bkops_statistics);
static void mmc_update_bkops_hpi(struct mmc_bkops_stats *stats)
{
spin_lock_irq(&stats->lock);
if (stats->enabled)
stats->hpi++;
spin_unlock_irq(&stats->lock);
}
static void mmc_update_bkops_start(struct mmc_bkops_stats *stats)
{
spin_lock_irq(&stats->lock);
if (stats->enabled)
stats->manual_start++;
spin_unlock_irq(&stats->lock);
}
static void mmc_update_bkops_auto_on(struct mmc_bkops_stats *stats)
{
spin_lock_irq(&stats->lock);
if (stats->enabled)
stats->auto_start++;
spin_unlock_irq(&stats->lock);
}
static void mmc_update_bkops_auto_off(struct mmc_bkops_stats *stats)
{
spin_lock_irq(&stats->lock);
if (stats->enabled)
stats->auto_stop++;
spin_unlock_irq(&stats->lock);
}
static void mmc_update_bkops_level(struct mmc_bkops_stats *stats,
unsigned level)
{
BUG_ON(level >= MMC_BKOPS_NUM_SEVERITY_LEVELS);
spin_lock_irq(&stats->lock);
if (stats->enabled)
stats->level[level]++;
spin_unlock_irq(&stats->lock);
}
/**
* mmc_set_auto_bkops - set auto BKOPS for supported cards
* @card: MMC card to start BKOPS
* @enable: enable/disable flag
*
* Configure the card to run automatic BKOPS.
*
* Should be called when host is claimed.
*/
int mmc_set_auto_bkops(struct mmc_card *card, bool enable)
{
int ret = 0;
u8 bkops_en;
BUG_ON(!card);
enable = !!enable;
if (unlikely(!mmc_card_support_auto_bkops(card))) {
pr_err("%s: %s: card doesn't support auto bkops\n",
mmc_hostname(card->host), __func__);
return -EPERM;
}
if (enable) {
if (mmc_card_doing_auto_bkops(card))
goto out;
bkops_en = card->ext_csd.bkops_en | EXT_CSD_BKOPS_AUTO_EN;
} else {
if (!mmc_card_doing_auto_bkops(card))
goto out;
bkops_en = card->ext_csd.bkops_en & ~EXT_CSD_BKOPS_AUTO_EN;
}
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
bkops_en, 0);
if (ret) {
pr_err("%s: %s: error in setting auto bkops to %d (%d)\n",
mmc_hostname(card->host), __func__, enable, ret);
} else {
if (enable) {
mmc_card_set_auto_bkops(card);
mmc_update_bkops_auto_on(&card->bkops.stats);
} else {
mmc_card_clr_auto_bkops(card);
mmc_update_bkops_auto_off(&card->bkops.stats);
}
card->ext_csd.bkops_en = bkops_en;
pr_debug("%s: %s: bkops state %x\n",
mmc_hostname(card->host), __func__, bkops_en);
}
out:
return ret;
}
EXPORT_SYMBOL(mmc_set_auto_bkops);
/**
* mmc_check_bkops - check BKOPS for supported cards
* @card: MMC card to check BKOPS
*
* Read the BKOPS status in order to determine whether the
* card requires bkops to be started.
*/
void mmc_check_bkops(struct mmc_card *card)
{
int err;
BUG_ON(!card);
if (mmc_card_doing_bkops(card))
return;
err = mmc_read_bkops_status(card);
if (err) {
pr_err("%s: Failed to read bkops status: %d\n",
mmc_hostname(card->host), err);
return;
}
card->bkops.needs_check = false;
mmc_update_bkops_level(&card->bkops.stats,
card->ext_csd.raw_bkops_status);
card->bkops.needs_bkops = card->ext_csd.raw_bkops_status > 0;
}
EXPORT_SYMBOL(mmc_check_bkops);
/**
* mmc_start_manual_bkops - start BKOPS for supported cards
* @card: MMC card to start BKOPS
*
* Send START_BKOPS to the card.
* The function should be called with claimed host.
*/
void mmc_start_manual_bkops(struct mmc_card *card)
{
int err;
BUG_ON(!card);
if (unlikely(!mmc_card_configured_manual_bkops(card)))
return;
if (mmc_card_doing_bkops(card))
return;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START,
1, 0, false, true, false);
if (err) {
pr_err("%s: Error %d starting manual bkops\n",
mmc_hostname(card->host), err);
} else {
mmc_card_set_doing_bkops(card);
mmc_update_bkops_start(&card->bkops.stats);
card->bkops.needs_bkops = false;
}
}
EXPORT_SYMBOL(mmc_start_manual_bkops);
/*
* mmc_wait_data_done() - done callback for data request
* @mrq: done data request
*
* Wakes up mmc context, passed as a callback to host controller driver
*/
static void mmc_wait_data_done(struct mmc_request *mrq)
{
unsigned long flags;
struct mmc_context_info *context_info = &mrq->host->context_info;
spin_lock_irqsave(&context_info->lock, flags);
context_info->is_done_rcv = true;
wake_up_interruptible(&context_info->wait);
spin_unlock_irqrestore(&context_info->lock, flags);
}
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
/*
*__mmc_start_data_req() - starts data request
* @host: MMC host to start the request
* @mrq: data request to start
*
* Sets the done callback to be called when request is completed by the card.
* Starts data mmc request execution
*/
static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
{
mrq->done = mmc_wait_data_done;
mrq->host = host;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_wait_data_done(mrq);
return -ENOMEDIUM;
}
mmc_start_request(host, mrq);
return 0;
}
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
complete(&mrq->completion);
return -ENOMEDIUM;
}
mmc_start_request(host, mrq);
return 0;
}
/*
* mmc_wait_for_data_req_done() - wait for request completed
* @host: MMC host to prepare the command.
* @mrq: MMC request to wait for
*
* Blocks MMC context till host controller will ack end of data request
* execution or new request notification arrives from the block layer.
* Handles command retries.
*
* Returns enum mmc_blk_status after checking errors.
*/
static int mmc_wait_for_data_req_done(struct mmc_host *host,
struct mmc_request *mrq,
struct mmc_async_req *next_req)
{
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
int err;
bool is_done_rcv = false;
unsigned long flags;
while (1) {
wait_event_interruptible(context_info->wait,
(context_info->is_done_rcv ||
context_info->is_new_req));
spin_lock_irqsave(&context_info->lock, flags);
is_done_rcv = context_info->is_done_rcv;
context_info->is_waiting_last_req = false;
spin_unlock_irqrestore(&context_info->lock, flags);
if (is_done_rcv) {
context_info->is_done_rcv = false;
context_info->is_new_req = false;
cmd = mrq->cmd;
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card)) {
err = host->areq->err_check(host->card,
host->areq);
break; /* return err */
} else {
pr_info("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host),
cmd->opcode, cmd->error);
cmd->retries--;
cmd->error = 0;
host->ops->request(host, mrq);
continue; /* wait for done/new event again */
}
} else if (context_info->is_new_req) {
context_info->is_new_req = false;
if (!next_req) {
err = MMC_BLK_NEW_REQUEST;
break; /* return err */
}
}
}
return err;
}
static void mmc_wait_for_req_done(struct mmc_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd;
while (1) {
wait_for_completion_io(&mrq->completion);
cmd = mrq->cmd;
/*
* If host has timed out waiting for the sanitize/bkops
* to complete, card might be still in programming state
* so let's try to bring the card out of programming
* state.
*/
if ((cmd->bkops_busy || cmd->sanitize_busy) && cmd->error == -ETIMEDOUT) {
if (!mmc_interrupt_hpi(host->card)) {
pr_warn("%s: %s: Interrupted sanitize/bkops\n",
mmc_hostname(host), __func__);
cmd->error = 0;
break;
} else {
pr_err("%s: %s: Failed to interrupt sanitize\n",
mmc_hostname(host), __func__);
}
}
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card))
break;
pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host), cmd->opcode, cmd->error);
cmd->retries--;
cmd->error = 0;
host->ops->request(host, mrq);
}
}
/**
* mmc_pre_req - Prepare for a new request
* @host: MMC host to prepare command
* @mrq: MMC request to prepare for
* @is_first_req: true if there is no previous started request
* that may run in parellel to this call, otherwise false
*
* mmc_pre_req() is called in prior to mmc_start_req() to let
* host prepare for the new request. Preparation of a request may be
* performed while another request is running on the host.
*/
static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
bool is_first_req)
{
if (host->ops->pre_req) {
mmc_host_clk_hold(host);
host->ops->pre_req(host, mrq, is_first_req);
mmc_host_clk_release(host);
}
}
/**
* mmc_post_req - Post process a completed request
* @host: MMC host to post process command
* @mrq: MMC request to post process for
* @err: Error, if non zero, clean up any resources made in pre_req
*
* Let the host post process a completed request. Post processing of
* a request may be performed while another reuqest is running.
*/
static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
int err)
{
if (host->ops->post_req) {
mmc_host_clk_hold(host);
host->ops->post_req(host, mrq, err);
mmc_host_clk_release(host);
}
}
/**
* mmc_cmdq_discard_card_queue - discard the task[s] in the device
* @host: host instance
* @tasks: mask of tasks to be knocked off
* 0: remove all queued tasks
*/
int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks)
{
return mmc_discard_queue(host, tasks);
}
EXPORT_SYMBOL(mmc_cmdq_discard_queue);
/**
* mmc_cmdq_post_req - post process of a completed request
* @host: host instance
* @tag: the request tag.
* @err: non-zero is error, success otherwise
*/
void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err)
{
if (likely(host->cmdq_ops->post_req))
host->cmdq_ops->post_req(host, tag, err);
}
EXPORT_SYMBOL(mmc_cmdq_post_req);
/**
* mmc_cmdq_halt - halt/un-halt the command queue engine
* @host: host instance
* @halt: true - halt, un-halt otherwise
*
* Host halts the command queue engine. It should complete
* the ongoing transfer and release the bus.
* All legacy commands can be sent upon successful
* completion of this function.
* Returns 0 on success, negative otherwise
*/
int mmc_cmdq_halt(struct mmc_host *host, bool halt)
{
int err = 0;
if ((halt && mmc_host_halt(host)) ||
(!halt && !mmc_host_halt(host))) {
pr_debug("%s: %s: CQE is already %s\n", mmc_hostname(host),
__func__, halt ? "halted" : "un-halted");
return 0;
}
mmc_host_clk_hold(host);
if (host->cmdq_ops->halt) {
err = host->cmdq_ops->halt(host, halt);
if (!err && host->ops->notify_halt)
host->ops->notify_halt(host, halt);
if (!err && halt)
mmc_host_set_halt(host);
else if (!err && !halt) {
mmc_host_clr_halt(host);
wake_up(&host->cmdq_ctx.wait);
}
} else {
err = -ENOSYS;
}
mmc_host_clk_release(host);
return err;
}
EXPORT_SYMBOL(mmc_cmdq_halt);
int mmc_cmdq_start_req(struct mmc_host *host, struct mmc_cmdq_req *cmdq_req)
{
struct mmc_request *mrq = &cmdq_req->mrq;
mrq->host = host;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
return -ENOMEDIUM;
}
mmc_start_cmdq_request(host, mrq);
return 0;
}
EXPORT_SYMBOL(mmc_cmdq_start_req);
static void mmc_cmdq_dcmd_req_done(struct mmc_request *mrq)
{
mmc_host_clk_release(mrq->host);
complete(&mrq->completion);
}
int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
struct mmc_cmdq_req *cmdq_req)
{
struct mmc_request *mrq = &cmdq_req->mrq;
struct mmc_command *cmd = mrq->cmd;
int err = 0;
init_completion(&mrq->completion);
mrq->done = mmc_cmdq_dcmd_req_done;
err = mmc_cmdq_start_req(host, cmdq_req);
if (err)
return err;
wait_for_completion_io(&mrq->completion);
if (cmd->error) {
pr_err("%s: DCMD %d failed with err %d\n",
mmc_hostname(host), cmd->opcode,
cmd->error);
err = cmd->error;
mmc_host_clk_hold(host);
host->cmdq_ops->dumpstate(host);
mmc_host_clk_release(host);
}
return err;
}
EXPORT_SYMBOL(mmc_cmdq_wait_for_dcmd);
int mmc_cmdq_prepare_flush(struct mmc_command *cmd)
{
return __mmc_switch_cmdq_mode(cmd, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1,
0, true, true);
}
EXPORT_SYMBOL(mmc_cmdq_prepare_flush);
/**
* mmc_start_req - start a non-blocking request
* @host: MMC host to start command
* @areq: async request to start
* @error: out parameter returns 0 for success, otherwise non zero
*
* Start a new MMC custom command request for a host.
* If there is on ongoing async request wait for completion
* of that request and start the new one and return.
* Does not wait for the new request to complete.
*
* Returns the completed request, NULL in case of none completed.
* Wait for the an ongoing request (previoulsy started) to complete and
* return the completed request. If there is no ongoing request, NULL
* is returned without waiting. NULL is not an error condition.
*/
struct mmc_async_req *mmc_start_req(struct mmc_host *host,
struct mmc_async_req *areq, int *error)
{
int err = 0;
int start_err = 0;
struct mmc_async_req *data = host->areq;
/* Prepare a new request */
if (areq)
mmc_pre_req(host, areq->mrq, !host->areq);
if (host->areq) {
err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
if (err == MMC_BLK_NEW_REQUEST) {
if (error)
*error = err;
/*
* The previous request was not completed,
* nothing to return
*/
return NULL;
}
/*
* Check BKOPS urgency for each R1 response
*/
if (host->card && mmc_card_mmc(host->card) &&
((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
(mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
(host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT))
mmc_check_bkops(host->card);
}
if (!err && areq) {
trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
areq->mrq->cmd->arg,
areq->mrq->data);
start_err = __mmc_start_data_req(host, areq->mrq);
}
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
if (err && areq)
mmc_post_req(host, areq->mrq, -EINVAL);
if (err)
host->areq = NULL;
else
host->areq = areq;
if (error)
*error = err;
return data;
}
EXPORT_SYMBOL(mmc_start_req);
/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
* @mrq: MMC request to start
*
* Start a new MMC custom command request for a host, and wait
* for the command to complete. Does not attempt to parse the
* response.
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
__mmc_start_req(host, mrq);
mmc_wait_for_req_done(host, mrq);
}
EXPORT_SYMBOL(mmc_wait_for_req);
/**
* mmc_interrupt_hpi - Issue for High priority Interrupt
* @card: the MMC card associated with the HPI transfer
*
* Issued High Priority Interrupt, and check for card status
* until out-of prg-state.
*/
int mmc_interrupt_hpi(struct mmc_card *card)
{
int err;
u32 status;
unsigned long prg_wait;
BUG_ON(!card);
if (!card->ext_csd.hpi_en) {
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
return 1;
}
mmc_claim_host(card->host);
err = mmc_send_status(card, &status);
if (err) {
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
goto out;
}
switch (R1_CURRENT_STATE(status)) {
case R1_STATE_IDLE:
case R1_STATE_READY:
case R1_STATE_STBY:
case R1_STATE_TRAN:
/*
* In idle and transfer states, HPI is not needed and the caller
* can issue the next intended command immediately
*/
goto out;
case R1_STATE_PRG:
break;
default:
/* In all other states, it's illegal to issue HPI */
pr_debug("%s: HPI cannot be sent. Card state=%d\n",
mmc_hostname(card->host), R1_CURRENT_STATE(status));
err = -EINVAL;
goto out;
}
err = mmc_send_hpi_cmd(card, &status);
prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
do {
err = mmc_send_status(card, &status);
if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
break;
if (time_after(jiffies, prg_wait)) {
err = mmc_send_status(card, &status);
if (!err && R1_CURRENT_STATE(status) != R1_STATE_TRAN)
err = -ETIMEDOUT;
else
break;
}
} while (!err);
out:
mmc_release_host(card->host);
return err;
}
EXPORT_SYMBOL(mmc_interrupt_hpi);
/**
* mmc_wait_for_cmd - start a command and wait for completion
* @host: MMC host to start command
* @cmd: MMC command to start
* @retries: maximum number of retries
*
* Start a new MMC command for a host, and wait for the command
* to complete. Return any error that occurred while the command
* was executing. Do not attempt to parse the response.
*/
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
{
struct mmc_request mrq = {NULL};
WARN_ON(!host->claimed);
memset(cmd->resp, 0, sizeof(cmd->resp));
cmd->retries = retries;
mrq.cmd = cmd;
cmd->data = NULL;
mmc_wait_for_req(host, &mrq);
return cmd->error;
}
EXPORT_SYMBOL(mmc_wait_for_cmd);
/**
* mmc_stop_bkops - stop ongoing BKOPS
* @card: MMC card to check BKOPS
*
* Send HPI command to stop ongoing background operations to
* allow rapid servicing of foreground operations, e.g. read/
* writes. Wait until the card comes out of the programming state
* to avoid errors in servicing read/write requests.
*/
int mmc_stop_bkops(struct mmc_card *card)
{
int err = 0;
BUG_ON(!card);
if (unlikely(!mmc_card_configured_manual_bkops(card)))
goto out;
if (!mmc_card_doing_bkops(card))
goto out;
err = mmc_interrupt_hpi(card);
/*
* If err is EINVAL, we can't issue an HPI.
* It should complete the BKOPS.
*/
if (!err || (err == -EINVAL)) {
mmc_card_clr_doing_bkops(card);
mmc_update_bkops_hpi(&card->bkops.stats);
err = 0;
}
out:
return err;
}
EXPORT_SYMBOL(mmc_stop_bkops);
int mmc_read_bkops_status(struct mmc_card *card)
{
int err;
u8 *ext_csd;
/*
* In future work, we should consider storing the entire ext_csd.
*/
ext_csd = kmalloc(512, GFP_KERNEL);
if (!ext_csd) {
pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
mmc_hostname(card->host));
return -ENOMEM;
}
mmc_claim_host(card->host);
err = mmc_send_ext_csd(card, ext_csd);
mmc_release_host(card->host);
if (err)
goto out;
card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS] &
MMC_BKOPS_URGENCY_MASK;
card->ext_csd.raw_exception_status =
ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & (EXT_CSD_URGENT_BKOPS |
EXT_CSD_DYNCAP_NEEDED |
EXT_CSD_SYSPOOL_EXHAUSTED
| EXT_CSD_PACKED_FAILURE);
out:
kfree(ext_csd);
return err;
}
EXPORT_SYMBOL(mmc_read_bkops_status);
/**
* mmc_set_data_timeout - set the timeout for a data command
* @data: data phase for command
* @card: the MMC card associated with the data transfer
*
* Computes the data timeout parameters according to the
* correct algorithm given the card type.
*/
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
{
unsigned int mult;
if (!card) {
WARN_ON(1);
return;
}
/*
* SDIO cards only define an upper 1 s limit on access.
*/
if (mmc_card_sdio(card)) {
data->timeout_ns = 1000000000;
data->timeout_clks = 0;
return;
}
/*
* SD cards use a 100 multiplier rather than 10
*/
mult = mmc_card_sd(card) ? 100 : 10;
/*
* Scale up the multiplier (and therefore the timeout) by
* the r2w factor for writes.
*/
if (data->flags & MMC_DATA_WRITE)
mult <<= card->csd.r2w_factor;
data->timeout_ns = card->csd.tacc_ns * mult;
data->timeout_clks = card->csd.tacc_clks * mult;
/*
* SD cards also have an upper limit on the timeout.
*/
if (mmc_card_sd(card)) {
unsigned int timeout_us, limit_us;
timeout_us = data->timeout_ns / 1000;
if (mmc_host_clk_rate(card->host))
timeout_us += data->timeout_clks * 1000 /
(mmc_host_clk_rate(card->host) / 1000);
if (data->flags & MMC_DATA_WRITE)
/*
* The MMC spec "It is strongly recommended
* for hosts to implement more than 500ms
* timeout value even if the card indicates
* the 250ms maximum busy length." Even the
* previous value of 300ms is known to be
* insufficient for some cards.
*/
limit_us = 3000000;
else
limit_us = 100000;
/*
* SDHC cards always use these fixed values.
*/
if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
data->timeout_ns = limit_us * 1000;
data->timeout_clks = 0;
}
/* assign limit value if invalid */
if (timeout_us == 0)
data->timeout_ns = limit_us * 1000;
}
/*
* Some cards require longer data read timeout than indicated in CSD.
* Address this by setting the read timeout to a "reasonably high"
* value. For the cards tested, 600ms has proven enough. If necessary,
* this value can be increased if other problematic cards require this.
*/
if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
data->timeout_ns = 600000000;
data->timeout_clks = 0;
}
/*
* Some cards need very high timeouts if driven in SPI mode.
* The worst observed timeout was 900ms after writing a
* continuous stream of data until the internal logic
* overflowed.
*/
if (mmc_host_is_spi(card->host)) {
if (data->flags & MMC_DATA_WRITE) {
if (data->timeout_ns < 1000000000)
data->timeout_ns = 1000000000; /* 1s */
} else {
if (data->timeout_ns < 100000000)
data->timeout_ns = 100000000; /* 100ms */
}
}
/* Increase the timeout values for some bad INAND MCP devices */
if (card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT) {
data->timeout_ns = 4000000000u; /* 4s */
data->timeout_clks = 0;
}
}
EXPORT_SYMBOL(mmc_set_data_timeout);
/**
* mmc_align_data_size - pads a transfer size to a more optimal value
* @card: the MMC card associated with the data transfer
* @sz: original transfer size
*
* Pads the original data size with a number of extra bytes in
* order to avoid controller bugs and/or performance hits
* (e.g. some controllers revert to PIO for certain sizes).
*
* Returns the improved size, which might be unmodified.
*
* Note that this function is only relevant when issuing a
* single scatter gather entry.
*/
unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
{
/*
* FIXME: We don't have a system for the controller to tell
* the core about its problems yet, so for now we just 32-bit
* align the size.
*/
sz = ((sz + 3) / 4) * 4;
return sz;
}
EXPORT_SYMBOL(mmc_align_data_size);
/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
* @abort: whether or not the operation should be aborted
*
* Claim a host for a set of operations. If @abort is non null and
* dereference a non-zero value then this will return prematurely with
* that non-zero value without acquiring the lock. Returns zero
* with the lock held otherwise.
*/
int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int stop;
might_sleep();
add_wait_queue(&host->wq, &wait);
spin_lock_irqsave(&host->lock, flags);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
stop = abort ? atomic_read(abort) : 0;
if (stop || !host->claimed || host->claimer == current)
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
spin_lock_irqsave(&host->lock, flags);
}
set_current_state(TASK_RUNNING);
if (!stop) {
host->claimed = 1;
host->claimer = current;
host->claim_cnt += 1;
} else
wake_up(&host->wq);
spin_unlock_irqrestore(&host->lock, flags);
remove_wait_queue(&host->wq, &wait);
if (host->ops->enable && !stop && host->claim_cnt == 1)
host->ops->enable(host);
return stop;
}
EXPORT_SYMBOL(__mmc_claim_host);
/**
* mmc_release_host - release a host
* @host: mmc host to release
*
* Release a MMC host, allowing others to claim the host
* for their operations.
*/
void mmc_release_host(struct mmc_host *host)
{
unsigned long flags;
WARN_ON(!host->claimed);
if (host->ops->disable && host->claim_cnt == 1)
host->ops->disable(host);
spin_lock_irqsave(&host->lock, flags);
if (--host->claim_cnt) {
/* Release for nested claim */
spin_unlock_irqrestore(&host->lock, flags);
} else {
host->claimed = 0;
host->claimer = NULL;
spin_unlock_irqrestore(&host->lock, flags);
wake_up(&host->wq);
}
}
EXPORT_SYMBOL(mmc_release_host);
/*
* This is a helper function, which fetches a runtime pm reference for the
* card device and also claims the host.
*/
void mmc_get_card(struct mmc_card *card)
{
pm_runtime_get_sync(&card->dev);
mmc_claim_host(card->host);
}
EXPORT_SYMBOL(mmc_get_card);
/*
* This is a helper function, which releases the host and drops the runtime
* pm reference for the card device.
*/
void mmc_put_card(struct mmc_card *card)
{
mmc_release_host(card->host);
pm_runtime_mark_last_busy(&card->dev);
pm_runtime_put_autosuspend(&card->dev);
}
EXPORT_SYMBOL(mmc_put_card);
/*
* Internal function that does the actual ios call to the host driver,
* optionally printing some debug output.
*/
void mmc_set_ios(struct mmc_host *host)
{
struct mmc_ios *ios = &host->ios;
pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
"width %u timing %u\n",
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
ios->bus_width, ios->timing);
if (ios->clock > 0)
mmc_set_ungated(host);
host->ops->set_ios(host, ios);
if (ios->old_rate != ios->clock) {
if (likely(ios->clk_ts)) {
char trace_info[80];
snprintf(trace_info, 80,
"%s: freq_KHz %d --> %d | t = %d",
mmc_hostname(host), ios->old_rate / 1000,
ios->clock / 1000, jiffies_to_msecs(
(long)jiffies - (long)ios->clk_ts));
trace_mmc_clk(trace_info);
}
ios->old_rate = ios->clock;
ios->clk_ts = jiffies;
}
}
EXPORT_SYMBOL(mmc_set_ios);
/*
* Control chip select pin on a host.
*/
void mmc_set_chip_select(struct mmc_host *host, int mode)
{
mmc_host_clk_hold(host);
host->ios.chip_select = mode;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Sets the host clock to the highest possible frequency that
* is below "hz".
*/
static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
WARN_ON(hz && hz < host->f_min);
if (hz > host->f_max)
hz = host->f_max;
host->ios.clock = hz;
mmc_set_ios(host);
}
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
mmc_host_clk_hold(host);
__mmc_set_clock(host, hz);
mmc_host_clk_release(host);
}
#ifdef CONFIG_MMC_CLKGATE
/*
* This gates the clock by setting it to 0 Hz.
*/
void mmc_gate_clock(struct mmc_host *host)
{
unsigned long flags;
WARN_ON(!host->ios.clock);
spin_lock_irqsave(&host->clk_lock, flags);
host->clk_old = host->ios.clock;
host->ios.clock = 0;
host->clk_gated = true;
spin_unlock_irqrestore(&host->clk_lock, flags);
mmc_set_ios(host);
}
/*
* This restores the clock from gating by using the cached
* clock value.
*/
void mmc_ungate_clock(struct mmc_host *host)
{
/*
* We should previously have gated the clock, so the clock shall
* be 0 here! The clock may however be 0 during initialization,
* when some request operations are performed before setting
* the frequency. When ungate is requested in that situation
* we just ignore the call.
*/
if (host->clk_old) {
WARN_ON(host->ios.clock);
/* This call will also set host->clk_gated to false */
__mmc_set_clock(host, host->clk_old);
}
}
void mmc_set_ungated(struct mmc_host *host)
{
unsigned long flags;
/*
* We've been given a new frequency while the clock is gated,
* so make sure we regard this as ungating it.
*/
spin_lock_irqsave(&host->clk_lock, flags);
host->clk_gated = false;
spin_unlock_irqrestore(&host->clk_lock, flags);
}
#else
void mmc_set_ungated(struct mmc_host *host)
{
}
#endif
int mmc_execute_tuning(struct mmc_card *card)
{
struct mmc_host *host = card->host;
u32 opcode;
int err;
if (!host->ops->execute_tuning)
return 0;
if (mmc_card_mmc(card))
opcode = MMC_SEND_TUNING_BLOCK_HS200;
else
opcode = MMC_SEND_TUNING_BLOCK;
mmc_host_clk_hold(host);
err = host->ops->execute_tuning(host, opcode);
mmc_host_clk_release(host);
if (err)
pr_err("%s: tuning execution failed\n", mmc_hostname(host));
return err;
}
/*
* Change the bus mode (open drain/push-pull) of a host.
*/
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
{
mmc_host_clk_hold(host);
host->ios.bus_mode = mode;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Change data bus width of a host.
*/
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
{
mmc_host_clk_hold(host);
host->ios.bus_width = width;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/**
* mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
* @vdd: voltage (mV)
* @low_bits: prefer low bits in boundary cases
*
* This function returns the OCR bit number according to the provided @vdd
* value. If conversion is not possible a negative errno value returned.
*
* Depending on the @low_bits flag the function prefers low or high OCR bits
* on boundary voltages. For example,
* with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
* with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
*
* Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
*/
static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
{
const int max_bit = ilog2(MMC_VDD_35_36);
int bit;
if (vdd < 1650 || vdd > 3600)
return -EINVAL;
if (vdd >= 1650 && vdd <= 1950)
return ilog2(MMC_VDD_165_195);
if (low_bits)
vdd -= 1;
/* Base 2000 mV, step 100 mV, bit's base 8. */
bit = (vdd - 2000) / 100 + 8;
if (bit > max_bit)
return max_bit;
return bit;
}
/**
* mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
* @vdd_min: minimum voltage value (mV)
* @vdd_max: maximum voltage value (mV)
*
* This function returns the OCR mask bits according to the provided @vdd_min
* and @vdd_max values. If conversion is not possible the function returns 0.
*
* Notes wrt boundary cases:
* This function sets the OCR bits for all boundary voltages, for example
* [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
* MMC_VDD_34_35 mask.
*/
u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
{
u32 mask = 0;
if (vdd_max < vdd_min)
return 0;
/* Prefer high bits for the boundary vdd_max values. */
vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
if (vdd_max < 0)
return 0;
/* Prefer low bits for the boundary vdd_min values. */
vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
if (vdd_min < 0)
return 0;
/* Fill the mask, from max bit to min bit. */
while (vdd_max >= vdd_min)
mask |= 1 << vdd_max--;
return mask;
}
EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
#ifdef CONFIG_OF
/**
* mmc_of_parse_voltage - return mask of supported voltages
* @np: The device node need to be parsed.
* @mask: mask of voltages available for MMC/SD/SDIO
*
* 1. Return zero on success.
* 2. Return negative errno: voltage-range is invalid.
*/
int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
{
const u32 *voltage_ranges;
int num_ranges, i;
voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
if (!voltage_ranges || !num_ranges) {
pr_info("%s: voltage-ranges unspecified\n", np->full_name);
return -EINVAL;
}
for (i = 0; i < num_ranges; i++) {
const int j = i * 2;
u32 ocr_mask;
ocr_mask = mmc_vddrange_to_ocrmask(
be32_to_cpu(voltage_ranges[j]),
be32_to_cpu(voltage_ranges[j + 1]));
if (!ocr_mask) {
pr_err("%s: voltage-range #%d is invalid\n",
np->full_name, i);
return -EINVAL;
}
*mask |= ocr_mask;
}
return 0;
}
EXPORT_SYMBOL(mmc_of_parse_voltage);
#endif /* CONFIG_OF */
#ifdef CONFIG_REGULATOR
/**
* mmc_regulator_get_ocrmask - return mask of supported voltages
* @supply: regulator to use
*
* This returns either a negative errno, or a mask of voltages that
* can be provided to MMC/SD/SDIO devices using the specified voltage
* regulator. This would normally be called before registering the
* MMC host adapter.
*/
int mmc_regulator_get_ocrmask(struct regulator *supply)
{
int result = 0;
int count;
int i;
int vdd_uV;
int vdd_mV;
count = regulator_count_voltages(supply);
if (count < 0)
return count;
for (i = 0; i < count; i++) {
vdd_uV = regulator_list_voltage(supply, i);
if (vdd_uV <= 0)
continue;
vdd_mV = vdd_uV / 1000;
result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
}
if (!result) {
vdd_uV = regulator_get_voltage(supply);
if (vdd_uV <= 0)
return vdd_uV;
vdd_mV = vdd_uV / 1000;
result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
}
return result;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
/**
* mmc_regulator_set_ocr - set regulator to match host->ios voltage
* @mmc: the host to regulate
* @supply: regulator to use
* @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
*
* Returns zero on success, else negative errno.
*
* MMC host drivers may use this to enable or disable a regulator using
* a particular supply voltage. This would normally be called from the
* set_ios() method.
*/
int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit)
{
int result = 0;
int min_uV, max_uV;
if (vdd_bit) {
int tmp;
/*
* REVISIT mmc_vddrange_to_ocrmask() may have set some
* bits this regulator doesn't quite support ... don't
* be too picky, most cards and regulators are OK with
* a 0.1V range goof (it's a small error percentage).
*/
tmp = vdd_bit - ilog2(MMC_VDD_165_195);
if (tmp == 0) {
min_uV = 1650 * 1000;
max_uV = 1950 * 1000;
} else {
min_uV = 1900 * 1000 + tmp * 100 * 1000;
max_uV = min_uV + 100 * 1000;
}
result = regulator_set_voltage(supply, min_uV, max_uV);
if (result == 0 && !mmc->regulator_enabled) {
result = regulator_enable(supply);
if (!result)
mmc->regulator_enabled = true;
}
} else if (mmc->regulator_enabled) {
result = regulator_disable(supply);
if (result == 0)
mmc->regulator_enabled = false;
}
if (result)
dev_err(mmc_dev(mmc),
"could not set regulator OCR (%d)\n", result);
return result;
}
EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
#endif /* CONFIG_REGULATOR */
int mmc_regulator_get_supply(struct mmc_host *mmc)
{
struct device *dev = mmc_dev(mmc);
int ret;
mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
if (IS_ERR(mmc->supply.vmmc)) {
if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_info(dev, "No vmmc regulator found\n");
} else {
ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
if (ret > 0)
mmc->ocr_avail = ret;
else
dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
}
if (IS_ERR(mmc->supply.vqmmc)) {
if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_info(dev, "No vqmmc regulator found\n");
}
return 0;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
/*
* Mask off any voltages we don't support and select
* the lowest voltage
*/
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
{
int bit;
/*
* Sanity check the voltages that the card claims to
* support.
*/
if (ocr & 0x7F) {
dev_warn(mmc_dev(host),
"card claims to support voltages below defined range\n");
ocr &= ~0x7F;
}
ocr &= host->ocr_avail;
if (!ocr) {
dev_warn(mmc_dev(host), "no support for card's volts\n");
return 0;
}
if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
bit = ffs(ocr) - 1;
ocr &= 3 << bit;
mmc_power_cycle(host, ocr);
} else {
bit = fls(ocr) - 1;
ocr &= 3 << bit;
if (bit != host->ios.vdd)
dev_warn(mmc_dev(host), "exceeding card's volts\n");
}
return ocr;
}
int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
{
int err = 0;
int old_signal_voltage = host->ios.signal_voltage;
host->ios.signal_voltage = signal_voltage;
if (host->ops->start_signal_voltage_switch) {
mmc_host_clk_hold(host);
err = host->ops->start_signal_voltage_switch(host, &host->ios);
mmc_host_clk_release(host);
}
if (err)
host->ios.signal_voltage = old_signal_voltage;
return err;
}
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
{
struct mmc_command cmd = {0};
int err = 0;
u32 clock;
BUG_ON(!host);
/*
* Send CMD11 only if the request is to switch the card to
* 1.8V signalling.
*/
if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
return __mmc_set_signal_voltage(host, signal_voltage);
/*
* If we cannot switch voltages, return failure so the caller
* can continue without UHS mode
*/
if (!host->ops->start_signal_voltage_switch)
return -EPERM;
if (!host->ops->card_busy)
pr_warn("%s: cannot verify signal voltage switch\n",
mmc_hostname(host));
cmd.opcode = SD_SWITCH_VOLTAGE;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
/*
* Hold the clock reference so clock doesn't get auto gated during this
* voltage switch sequence.
*/
mmc_host_clk_hold(host);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
goto exit;
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
err = -EIO;
goto exit;
}
/*
* The card should drive cmd and dat[0:3] low immediately
* after the response of cmd11, but wait 1 ms to be sure
*/
mmc_delay(1);
if (host->ops->card_busy && !host->ops->card_busy(host)) {
err = -EAGAIN;
goto power_cycle;
}
/*
* During a signal voltage level switch, the clock must be gated
* for 5 ms according to the SD spec
*/
host->card_clock_off = true;
clock = host->ios.clock;
host->ios.clock = 0;
mmc_set_ios(host);
if (__mmc_set_signal_voltage(host, signal_voltage)) {
/*
* Voltages may not have been switched, but we've already
* sent CMD11, so a power cycle is required anyway
*/
err = -EAGAIN;
host->ios.clock = clock;
mmc_set_ios(host);
host->card_clock_off = false;
goto power_cycle;
}
/* Keep clock gated for at least 5 ms */
mmc_delay(5);
host->ios.clock = clock;
mmc_set_ios(host);
host->card_clock_off = false;
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
/*
* Failure to switch is indicated by the card holding
* dat[0:3] low
*/
if (host->ops->card_busy && host->ops->card_busy(host))
err = -EAGAIN;
power_cycle:
if (err) {
pr_debug("%s: Signal voltage switch failed, "
"power cycling card\n", mmc_hostname(host));
mmc_power_cycle(host, ocr);
}
exit:
mmc_host_clk_release(host);
return err;
}
/*
* Select timing parameters for host.
*/
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
{
mmc_host_clk_hold(host);
host->ios.timing = timing;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Select appropriate driver type for host.
*/
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
{
mmc_host_clk_hold(host);
host->ios.drv_type = drv_type;
mmc_set_ios(host);
mmc_host_clk_release(host);
}
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
* We then wait a bit for the power to stabilise. Finally,
* enable the bus drivers and clock to the card.
*
* We must _NOT_ enable the clock prior to power stablising.
*
* If a host does all the power sequencing itself, ignore the
* initial MMC_POWER_UP stage.
*/
void mmc_power_up(struct mmc_host *host, u32 ocr)
{
if (host->ios.power_mode == MMC_POWER_ON)
return;
mmc_host_clk_hold(host);
host->ios.vdd = fls(ocr) - 1;
if (mmc_host_is_spi(host))
host->ios.chip_select = MMC_CS_HIGH;
else {
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
}
host->ios.power_mode = MMC_POWER_UP;
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
/*
* This delay should be sufficient to allow the power supply
* to reach the minimum voltage.
*/
mmc_delay(10);
host->ios.clock = host->f_init;
host->ios.power_mode = MMC_POWER_ON;
mmc_set_ios(host);
/*
* This delay must be at least 74 clock sizes, or 1 ms, or the
* time required to reach a stable voltage.
*/
mmc_delay(10);
mmc_host_clk_release(host);
}
void mmc_power_off(struct mmc_host *host)
{
if (host->ios.power_mode == MMC_POWER_OFF)
return;
mmc_host_clk_hold(host);
host->ios.clock = 0;
host->ios.vdd = 0;
if (!mmc_host_is_spi(host)) {
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.chip_select = MMC_CS_DONTCARE;
}
host->ios.power_mode = MMC_POWER_OFF;
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
/*
* Some configurations, such as the 802.11 SDIO card in the OLPC
* XO-1.5, require a short delay after poweroff before the card
* can be successfully turned on again.
*/
mmc_delay(1);
mmc_host_clk_release(host);
}
void mmc_power_cycle(struct mmc_host *host, u32 ocr)
{
mmc_power_off(host);
/* Wait at least 1 ms according to SD spec */
mmc_delay(1);
mmc_power_up(host, ocr);
}
/*
* Cleanup when the last reference to the bus operator is dropped.
*/
static void __mmc_release_bus(struct mmc_host *host)
{
BUG_ON(!host);
BUG_ON(host->bus_refs);
BUG_ON(!host->bus_dead);
host->bus_ops = NULL;
}
/*
* Increase reference count of bus operator
*/
static inline void mmc_bus_get(struct mmc_host *host)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->bus_refs++;
spin_unlock_irqrestore(&host->lock, flags);
}
/*
* Decrease reference count of bus operator and free it if
* it is the last reference.
*/
static inline void mmc_bus_put(struct mmc_host *host)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->bus_refs--;
if ((host->bus_refs == 0) && host->bus_ops)
__mmc_release_bus(host);
spin_unlock_irqrestore(&host->lock, flags);
}
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
*/
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
{
unsigned long flags;
BUG_ON(!host);
BUG_ON(!ops);
WARN_ON(!host->claimed);
spin_lock_irqsave(&host->lock, flags);
BUG_ON(host->bus_ops);
BUG_ON(host->bus_refs);
host->bus_ops = ops;
host->bus_refs = 1;
host->bus_dead = 0;
spin_unlock_irqrestore(&host->lock, flags);
}
/*
* Remove the current bus handler from a host.
*/
void mmc_detach_bus(struct mmc_host *host)
{
unsigned long flags;
BUG_ON(!host);
WARN_ON(!host->claimed);
WARN_ON(!host->bus_ops);
spin_lock_irqsave(&host->lock, flags);
host->bus_dead = 1;
spin_unlock_irqrestore(&host->lock, flags);
mmc_bus_put(host);
}
static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
bool cd_irq)
{
#ifdef CONFIG_MMC_DEBUG
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
WARN_ON(host->removed);
spin_unlock_irqrestore(&host->lock, flags);
#endif
/*
* If the device is configured as wakeup, we prevent a new sleep for
* 5 s to give provision for user space to consume the event.
*/
if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
device_can_wakeup(mmc_dev(host)))
pm_wakeup_event(mmc_dev(host), 5000);
host->detect_change = 1;
mmc_schedule_delayed_work(&host->detect, delay);
}
/**
* mmc_detect_change - process change of state on a MMC socket
* @host: host which changed state.
* @delay: optional delay to wait before detection (jiffies)
*
* MMC drivers should call this when they detect a card has been
* inserted or removed. The MMC layer will confirm that any
* present card is still functional, and initialize any newly
* inserted.
*/
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
_mmc_detect_change(host, delay, true);
}
EXPORT_SYMBOL(mmc_detect_change);
void mmc_init_erase(struct mmc_card *card)
{
unsigned int sz;
if (is_power_of_2(card->erase_size))
card->erase_shift = ffs(card->erase_size) - 1;
else
card->erase_shift = 0;
/*
* It is possible to erase an arbitrarily large area of an SD or MMC
* card. That is not desirable because it can take a long time
* (minutes) potentially delaying more important I/O, and also the
* timeout calculations become increasingly hugely over-estimated.
* Consequently, 'pref_erase' is defined as a guide to limit erases
* to that size and alignment.
*
* For SD cards that define Allocation Unit size, limit erases to one
* Allocation Unit at a time. For MMC cards that define High Capacity
* Erase Size, whether it is switched on or not, limit to that size.
* Otherwise just have a stab at a good value. For modern cards it
* will end up being 4MiB. Note that if the value is too small, it
* can end up taking longer to erase.
*/
if (mmc_card_sd(card) && card->ssr.au) {
card->pref_erase = card->ssr.au;
card->erase_shift = ffs(card->ssr.au) - 1;
} else if (card->ext_csd.hc_erase_size) {
card->pref_erase = card->ext_csd.hc_erase_size;
} else if (card->erase_size) {
sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
if (sz < 128)
card->pref_erase = 512 * 1024 / 512;
else if (sz < 512)
card->pref_erase = 1024 * 1024 / 512;
else if (sz < 1024)
card->pref_erase = 2 * 1024 * 1024 / 512;
else
card->pref_erase = 4 * 1024 * 1024 / 512;
if (card->pref_erase < card->erase_size)
card->pref_erase = card->erase_size;
else {
sz = card->pref_erase % card->erase_size;
if (sz)
card->pref_erase += card->erase_size - sz;
}
} else
card->pref_erase = 0;
}
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
unsigned int arg, unsigned int qty)
{
unsigned int erase_timeout;
if (arg == MMC_DISCARD_ARG ||
(arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
erase_timeout = card->ext_csd.trim_timeout;
} else if (card->ext_csd.erase_group_def & 1) {
/* High Capacity Erase Group Size uses HC timeouts */
if (arg == MMC_TRIM_ARG)
erase_timeout = card->ext_csd.trim_timeout;
else
erase_timeout = card->ext_csd.hc_erase_timeout;
} else {
/* CSD Erase Group Size uses write timeout */
unsigned int mult = (10 << card->csd.r2w_factor);
unsigned int timeout_clks = card->csd.tacc_clks * mult;
unsigned int timeout_us;
/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
if (card->csd.tacc_ns < 1000000)
timeout_us = (card->csd.tacc_ns * mult) / 1000;
else
timeout_us = (card->csd.tacc_ns / 1000) * mult;
/*
* ios.clock is only a target. The real clock rate might be
* less but not that much less, so fudge it by multiplying by 2.
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
(mmc_host_clk_rate(card->host) / 1000);
erase_timeout = timeout_us / 1000;
/*
* Theoretically, the calculation could underflow so round up
* to 1ms in that case.
*/
if (!erase_timeout)
erase_timeout = 1;
}
/* Multiplier for secure operations */
if (arg & MMC_SECURE_ARGS) {
if (arg == MMC_SECURE_ERASE_ARG)
erase_timeout *= card->ext_csd.sec_erase_mult;
else
erase_timeout *= card->ext_csd.sec_trim_mult;
}
erase_timeout *= qty;
/*
* Ensure at least a 1 second timeout for SPI as per
* 'mmc_set_data_timeout()'
*/
if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
erase_timeout = 1000;
return erase_timeout;
}
static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
unsigned int arg,
unsigned int qty)
{
unsigned int erase_timeout;
if (card->ssr.erase_timeout) {
/* Erase timeout specified in SD Status Register (SSR) */
erase_timeout = card->ssr.erase_timeout * qty +
card->ssr.erase_offset;
} else {
/*
* Erase timeout not specified in SD Status Register (SSR) so
* use 250ms per write block.
*/
erase_timeout = 250 * qty;
}
/* Must not be less than 1 second */
if (erase_timeout < 1000)
erase_timeout = 1000;
return erase_timeout;
}
static unsigned int mmc_erase_timeout(struct mmc_card *card,
unsigned int arg,
unsigned int qty)
{
if (mmc_card_sd(card))
return mmc_sd_erase_timeout(card, arg, qty);
else
return mmc_mmc_erase_timeout(card, arg, qty);
}
static u32 mmc_get_erase_qty(struct mmc_card *card, u32 from, u32 to)
{
u32 qty = 0;
/*
* qty is used to calculate the erase timeout which depends on how many
* erase groups (or allocation units in SD terminology) are affected.
* We count erasing part of an erase group as one erase group.
* For SD, the allocation units are always a power of 2. For MMC, the
* erase group size is almost certainly also power of 2, but it does not
* seem to insist on that in the JEDEC standard, so we fall back to
* division in that case. SD may not specify an allocation unit size,
* in which case the timeout is based on the number of write blocks.
*
* Note that the timeout for secure trim 2 will only be correct if the
* number of erase groups specified is the same as the total of all
* preceding secure trim 1 commands. Since the power may have been
* lost since the secure trim 1 commands occurred, it is generally
* impossible to calculate the secure trim 2 timeout correctly.
*/
if (card->erase_shift)
qty += ((to >> card->erase_shift) -
(from >> card->erase_shift)) + 1;
else if (mmc_card_sd(card))
qty += to - from + 1;
else
qty += ((to / card->erase_size) -
(from / card->erase_size)) + 1;
return qty;
}
static int mmc_cmdq_send_erase_cmd(struct mmc_cmdq_req *cmdq_req,
struct mmc_card *card, u32 opcode, u32 arg, u32 qty)
{
struct mmc_command *cmd = cmdq_req->mrq.cmd;
int err;
memset(cmd, 0, sizeof(struct mmc_command));
cmd->opcode = opcode;
cmd->arg = arg;
if (cmd->opcode == MMC_ERASE) {
cmd->flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
cmd->busy_timeout = mmc_erase_timeout(card, arg, qty);
} else {
cmd->flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
}
err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
if (err) {
pr_err("mmc_erase: group start error %d, status %#x\n",
err, cmd->resp[0]);
return -EIO;
}
return 0;
}
static int mmc_cmdq_do_erase(struct mmc_cmdq_req *cmdq_req,
struct mmc_card *card, unsigned int from,
unsigned int to, unsigned int arg)
{
struct mmc_command *cmd = cmdq_req->mrq.cmd;
unsigned int qty = 0;
unsigned long timeout;
unsigned int fr, nr;
int err;
fr = from;
nr = to - from + 1;
trace_mmc_blk_erase_start(arg, fr, nr);
qty = mmc_get_erase_qty(card, from, to);
if (!mmc_card_blockaddr(card)) {
from <<= 9;
to <<= 9;
}
err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_START,
from, qty);
if (err)
goto out;
err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_END,
to, qty);
if (err)
goto out;
err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE,
arg, qty);
if (err)
goto out;
timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
do {
memset(cmd, 0, sizeof(struct mmc_command));
cmd->opcode = MMC_SEND_STATUS;
cmd->arg = card->rca << 16;
cmd->flags = MMC_RSP_R1 | MMC_CMD_AC;
/* Do not retry else we can't see errors */
err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
if (err || (cmd->resp[0] & 0xFDF92000)) {
pr_err("error %d requesting status %#x\n",
err, cmd->resp[0]);
err = -EIO;
goto out;
}
/* Timeout if the device never becomes ready for data and
* never leaves the program state.
*/
if (time_after(jiffies, timeout)) {
pr_err("%s: Card stuck in programming state! %s\n",
mmc_hostname(card->host), __func__);
err = -EIO;
goto out;
}
} while (!(cmd->resp[0] & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG));
out:
trace_mmc_blk_erase_end(arg, fr, nr);
return err;
}
static int mmc_do_erase(struct mmc_card *card, unsigned int from,
unsigned int to, unsigned int arg)
{
struct mmc_command cmd = {0};
unsigned int qty = 0;
unsigned long timeout;
unsigned int fr, nr;
int err;
fr = from;
nr = to - from + 1;
trace_mmc_blk_erase_start(arg, fr, nr);
qty = mmc_get_erase_qty(card, from, to);
if (!mmc_card_blockaddr(card)) {
from <<= 9;
to <<= 9;
}
if (mmc_card_sd(card))
cmd.opcode = SD_ERASE_WR_BLK_START;
else
cmd.opcode = MMC_ERASE_GROUP_START;
cmd.arg = from;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: group start error %d, "
"status %#x\n", err, cmd.resp[0]);
err = -EIO;
goto out;
}
memset(&cmd, 0, sizeof(struct mmc_command));
if (mmc_card_sd(card))
cmd.opcode = SD_ERASE_WR_BLK_END;
else
cmd.opcode = MMC_ERASE_GROUP_END;
cmd.arg = to;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: group end error %d, status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_ERASE;
cmd.arg = arg;
cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: erase error %d, status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
if (mmc_host_is_spi(card->host))
goto out;
timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
do {
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
/* Do not retry else we can't see errors */
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err || (cmd.resp[0] & 0xFDF92000)) {
pr_err("error %d requesting status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
/* Timeout if the device never becomes ready for data and
* never leaves the program state.
*/
if (time_after(jiffies, timeout)) {
pr_err("%s: Card stuck in programming state! %s\n",
mmc_hostname(card->host), __func__);
err = -EIO;
goto out;
}
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
out:
trace_mmc_blk_erase_end(arg, fr, nr);
return err;
}
int mmc_erase_sanity_check(struct mmc_card *card, unsigned int from,
unsigned int nr, unsigned int arg)
{
if (!(card->host->caps & MMC_CAP_ERASE) ||
!(card->csd.cmdclass & CCC_ERASE))
return -EOPNOTSUPP;
if (!card->erase_size)
return -EOPNOTSUPP;
if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
return -EOPNOTSUPP;
if ((arg & MMC_SECURE_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
return -EOPNOTSUPP;
if ((arg & MMC_TRIM_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
return -EOPNOTSUPP;
if (arg == MMC_SECURE_ERASE_ARG) {
if (from % card->erase_size || nr % card->erase_size)
return -EINVAL;
}
return 0;
}
int mmc_cmdq_erase(struct mmc_cmdq_req *cmdq_req,
struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg)
{
unsigned int rem, to = from + nr;
int ret;
ret = mmc_erase_sanity_check(card, from, nr, arg);
if (ret)
return ret;
if (arg == MMC_ERASE_ARG) {
rem = from % card->erase_size;
if (rem) {
rem = card->erase_size - rem;
from += rem;
if (nr > rem)
nr -= rem;
else
return 0;
}
rem = nr % card->erase_size;
if (rem)
nr -= rem;
}
if (nr == 0)
return 0;
to = from + nr;
if (to <= from)
return -EINVAL;
/* 'from' and 'to' are inclusive */
to -= 1;
return mmc_cmdq_do_erase(cmdq_req, card, from, to, arg);
}
EXPORT_SYMBOL(mmc_cmdq_erase);
/**
* mmc_erase - erase sectors.
* @card: card to erase
* @from: first sector to erase
* @nr: number of sectors to erase
* @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
*
* Caller must claim host before calling this function.
*/
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg)
{
unsigned int rem, to = from + nr;
int ret;
ret = mmc_erase_sanity_check(card, from, nr, arg);
if (ret)
return ret;
if (arg == MMC_ERASE_ARG) {
rem = from % card->erase_size;
if (rem) {
rem = card->erase_size - rem;
from += rem;
if (nr > rem)
nr -= rem;
else
return 0;
}
rem = nr % card->erase_size;
if (rem)
nr -= rem;
}
if (nr == 0)
return 0;
to = from + nr;
if (to <= from)
return -EINVAL;
/* 'from' and 'to' are inclusive */
to -= 1;
return mmc_do_erase(card, from, to, arg);
}
EXPORT_SYMBOL(mmc_erase);
int mmc_can_erase(struct mmc_card *card)
{
if ((card->host->caps & MMC_CAP_ERASE) &&
(card->csd.cmdclass & CCC_ERASE) && card->erase_size)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_erase);
int mmc_can_trim(struct mmc_card *card)
{
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_trim);
int mmc_can_discard(struct mmc_card *card)
{
/*
* As there's no way to detect the discard support bit at v4.5
* use the s/w feature support filed.
*/
if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_discard);
int mmc_can_sanitize(struct mmc_card *card)
{
if (!mmc_can_trim(card) && !mmc_can_erase(card))
return 0;
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_sanitize);
int mmc_can_secure_erase_trim(struct mmc_card *card)
{
if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_secure_erase_trim);
int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
unsigned int nr)
{
if (!card->erase_size)
return 0;
if (from % card->erase_size || nr % card->erase_size)
return 0;
return 1;
}
EXPORT_SYMBOL(mmc_erase_group_aligned);
static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
unsigned int arg)
{
struct mmc_host *host = card->host;
unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
unsigned int last_timeout = 0;
if (card->erase_shift)
max_qty = UINT_MAX >> card->erase_shift;
else if (mmc_card_sd(card))
max_qty = UINT_MAX;
else
max_qty = UINT_MAX / card->erase_size;
/* Find the largest qty with an OK timeout */
do {
y = 0;
for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
timeout = mmc_erase_timeout(card, arg, qty + x);
if (timeout > host->max_busy_timeout)
break;
if (timeout < last_timeout)
break;
last_timeout = timeout;
y = x;
}
qty += y;
} while (y);
if (!qty)
return 0;
if (qty == 1)
return 1;
/* Convert qty to sectors */
if (card->erase_shift)
max_discard = --qty << card->erase_shift;
else if (mmc_card_sd(card))
max_discard = qty;
else
max_discard = --qty * card->erase_size;
return max_discard;
}
unsigned int mmc_calc_max_discard(struct mmc_card *card)
{
struct mmc_host *host = card->host;
unsigned int max_discard, max_trim;
if (!host->max_busy_timeout ||
(host->caps2 & MMC_CAP2_MAX_DISCARD_SIZE))
return UINT_MAX;
/*
* Without erase_group_def set, MMC erase timeout depends on clock
* frequence which can change. In that case, the best choice is
* just the preferred erase size.
*/
if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
return card->pref_erase;
max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
if (mmc_can_trim(card)) {
max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
if (max_trim < max_discard)
max_discard = max_trim;
} else if (max_discard < card->erase_size) {
max_discard = 0;
}
pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
mmc_hostname(host), max_discard, host->max_busy_timeout);
return max_discard;
}
EXPORT_SYMBOL(mmc_calc_max_discard);
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
struct mmc_command cmd = {0};
if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
return 0;
cmd.opcode = MMC_SET_BLOCKLEN;
cmd.arg = blocklen;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
return mmc_wait_for_cmd(card->host, &cmd, 5);
}
EXPORT_SYMBOL(mmc_set_blocklen);
int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write)
{
struct mmc_command cmd = {0};
cmd.opcode = MMC_SET_BLOCK_COUNT;
cmd.arg = blockcount & 0x0000FFFF;
if (is_rel_write)
cmd.arg |= 1 << 31;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
return mmc_wait_for_cmd(card->host, &cmd, 5);
}
EXPORT_SYMBOL(mmc_set_blockcount);
static void mmc_hw_reset_for_init(struct mmc_host *host)
{
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
return;
mmc_host_clk_hold(host);
host->ops->hw_reset(host);
mmc_host_clk_release(host);
}
int mmc_can_reset(struct mmc_card *card)
{
u8 rst_n_function;
if (mmc_card_sdio(card))
return 0;
if (mmc_card_mmc(card) && (card->host->caps & MMC_CAP_HW_RESET)) {
rst_n_function = card->ext_csd.rst_n_function;
if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) !=
EXT_CSD_RST_N_ENABLED)
return 0;
}
return 1;
}
EXPORT_SYMBOL(mmc_can_reset);
static int mmc_do_hw_reset(struct mmc_host *host, int check)
{
struct mmc_card *card = host->card;
int ret;
if (!host->bus_ops->power_restore)
return -EOPNOTSUPP;
if (!card)
return -EINVAL;
if (!mmc_can_reset(card))
return -EOPNOTSUPP;
mmc_host_clk_hold(host);
mmc_set_clock(host, host->f_init);
if (mmc_card_mmc(card) && host->ops->hw_reset)
host->ops->hw_reset(host);
else
mmc_power_cycle(host, host->ocr_avail);
/* If the reset has happened, then a status command will fail */
if (check) {
struct mmc_command cmd = {0};
int err;
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (!err) {
mmc_host_clk_release(host);
return -ENOSYS;
}
}
if (mmc_host_is_spi(host)) {
host->ios.chip_select = MMC_CS_HIGH;
host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
} else {
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
}
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
mmc_host_clk_release(host);
mmc_claim_host(host);
ret = host->bus_ops->power_restore(host);
mmc_release_host(host);
return ret;
}
/*
* mmc_cmdq_hw_reset: Helper API for doing
* reset_all of host and reinitializing card.
* This must be called with mmc_claim_host
* acquired by the caller.
*/
int mmc_cmdq_hw_reset(struct mmc_host *host)
{
if (!host->bus_ops->power_restore)
return -EOPNOTSUPP;
mmc_power_cycle(host, host->ocr_avail);
mmc_select_voltage(host, host->card->ocr);
return host->bus_ops->power_restore(host);
}
EXPORT_SYMBOL(mmc_cmdq_hw_reset);
int mmc_hw_reset(struct mmc_host *host)
{
return mmc_do_hw_reset(host, 0);
}
EXPORT_SYMBOL(mmc_hw_reset);
int mmc_hw_reset_check(struct mmc_host *host)
{
return mmc_do_hw_reset(host, 1);
}
EXPORT_SYMBOL(mmc_hw_reset_check);
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
{
host->f_init = freq;
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: trying to init card at %u Hz\n",
mmc_hostname(host), __func__, host->f_init);
#endif
mmc_power_up(host, host->ocr_avail);
/*
* Some eMMCs (with VCCQ always on) may not be reset after power up, so
* do a hardware reset if possible.
*/
mmc_hw_reset_for_init(host);
/*
* sdio_reset sends CMD52 to reset card. Since we do not know
* if the card is being re-initialized, just send it. CMD52
* should be ignored by SD/eMMC cards.
*/
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
/* Order's important: probe SDIO, then SD, then MMC */
if (!mmc_attach_sdio(host))
return 0;
if (!mmc_attach_sd(host))
return 0;
if (!mmc_attach_mmc(host))
return 0;
mmc_power_off(host);
return -EIO;
}
int _mmc_detect_card_removed(struct mmc_host *host)
{
int ret;
if (host->caps & MMC_CAP_NONREMOVABLE)
return 0;
if (!host->card || mmc_card_removed(host->card))
return 1;
ret = host->bus_ops->alive(host);
/*
* Card detect status and alive check may be out of sync if card is
* removed slowly, when card detect switch changes while card/slot
* pads are still contacted in hardware (refer to "SD Card Mechanical
* Addendum, Appendix C: Card Detection Switch"). So reschedule a
* detect work 200ms later for this case.
*/
if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
mmc_detect_change(host, msecs_to_jiffies(200));
pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
}
if (ret) {
mmc_card_set_removed(host->card);
pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
return ret;
}
int mmc_detect_card_removed(struct mmc_host *host)
{
struct mmc_card *card = host->card;
int ret;
WARN_ON(!host->claimed);
if (!card)
return 1;
ret = mmc_card_removed(card);
/*
* The card will be considered unchanged unless we have been asked to
* detect a change or host requires polling to provide card detection.
*/
if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
return ret;
host->detect_change = 0;
if (!ret) {
ret = _mmc_detect_card_removed(host);
if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
/*
* Schedule a detect work as soon as possible to let a
* rescan handle the card removal.
*/
cancel_delayed_work(&host->detect);
_mmc_detect_change(host, 0, false);
}
}
return ret;
}
EXPORT_SYMBOL(mmc_detect_card_removed);
void mmc_rescan(struct work_struct *work)
{
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
if (host->trigger_card_event && host->ops->card_event) {
host->ops->card_event(host);
host->trigger_card_event = false;
}
if (host->rescan_disable)
return;
/* If there is a non-removable card registered, only scan once */
if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
return;
host->rescan_entered = 1;
mmc_bus_get(host);
/*
* if there is a _removable_ card registered, check whether it is
* still present
*/
if (host->bus_ops && !host->bus_dead
&& !(host->caps & MMC_CAP_NONREMOVABLE))
host->bus_ops->detect(host);
host->detect_change = 0;
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
*/
mmc_bus_put(host);
mmc_bus_get(host);
/* if there still is a card present, stop here */
if (host->bus_ops != NULL) {
mmc_bus_put(host);
goto out;
}
/*
* Only we can add a new handler, so it's safe to
* release the lock here.
*/
mmc_bus_put(host);
if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
host->ops->get_cd(host) == 0) {
mmc_claim_host(host);
mmc_power_off(host);
mmc_release_host(host);
goto out;
}
mmc_claim_host(host);
(void) mmc_rescan_try_freq(host, host->f_min);
mmc_release_host(host);
out:
if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
}
void mmc_start_host(struct mmc_host *host)
{
mmc_claim_host(host);
host->f_init = max(freqs[0], host->f_min);
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
mmc_power_off(host);
else
mmc_power_up(host, host->ocr_avail);
mmc_gpiod_request_cd_irq(host);
mmc_release_host(host);
_mmc_detect_change(host, 0, false);
}
void mmc_stop_host(struct mmc_host *host)
{
#ifdef CONFIG_MMC_DEBUG
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->removed = 1;
spin_unlock_irqrestore(&host->lock, flags);
#endif
if (host->slot.cd_irq >= 0)
disable_irq(host->slot.cd_irq);
host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
host->pm_flags = 0;
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
mmc_bus_put(host);
return;
}
mmc_bus_put(host);
BUG_ON(host->card);
mmc_power_off(host);
}
int mmc_power_save_host(struct mmc_host *host)
{
int ret = 0;
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
#endif
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead) {
mmc_bus_put(host);
return -EINVAL;
}
if (host->bus_ops->power_save)
ret = host->bus_ops->power_save(host);
mmc_bus_put(host);
mmc_power_off(host);
return ret;
}
EXPORT_SYMBOL(mmc_power_save_host);
int mmc_power_restore_host(struct mmc_host *host)
{
int ret;
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
#endif
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead) {
mmc_bus_put(host);
return -EINVAL;
}
mmc_power_up(host, host->card->ocr);
mmc_claim_host(host);
ret = host->bus_ops->power_restore(host);
mmc_release_host(host);
mmc_bus_put(host);
return ret;
}
EXPORT_SYMBOL(mmc_power_restore_host);
/*
* Add barrier request to the requests in cache
*/
int mmc_cache_barrier(struct mmc_card *card)
{
struct mmc_host *host = card->host;
int err = 0;
if (!card->ext_csd.cache_ctrl ||
(card->quirks & MMC_QUIRK_CACHE_DISABLE))
goto out;
if (!mmc_card_mmc(card))
goto out;
if (!card->ext_csd.barrier_en)
return -ENOTSUPP;
/*
* If a device receives maximum supported barrier
* requests, a barrier command is treated as a
* flush command. Hence, it is betetr to use
* flush timeout instead a generic CMD6 timeout
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 0x2, 0);
if (err)
pr_err("%s: cache barrier error %d\n",
mmc_hostname(host), err);
out:
return err;
}
EXPORT_SYMBOL(mmc_cache_barrier);
/*
* Flush the cache to the non-volatile storage.
*/
int mmc_flush_cache(struct mmc_card *card)
{
int err = 0;
if (mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0) &&
(card->ext_csd.cache_ctrl & 1) &&
(!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1, 0);
if (err == -ETIMEDOUT) {
pr_err("%s: cache flush timeout\n",
mmc_hostname(card->host));
err = mmc_interrupt_hpi(card);
if (err) {
pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
mmc_hostname(card->host), err);
err = -ENODEV;
}
} else if (err) {
pr_err("%s: cache flush error %d\n",
mmc_hostname(card->host), err);
}
}
return err;
}
EXPORT_SYMBOL(mmc_flush_cache);
#ifdef CONFIG_PM
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
to sync the card.
*/
int mmc_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
struct mmc_host *host = container_of(
notify_block, struct mmc_host, pm_notify);
unsigned long flags;
int err = 0;
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
cancel_delayed_work_sync(&host->detect);
if (!host->bus_ops)
break;
/* Validate prerequisites for suspend */
if (host->bus_ops->pre_suspend)
err = host->bus_ops->pre_suspend(host);
if (!err)
break;
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
host->pm_flags = 0;
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
_mmc_detect_change(host, 0, false);
}
return 0;
}
#endif
/**
* mmc_init_context_info() - init synchronization context
* @host: mmc host
*
* Init struct context_info needed to implement asynchronous
* request mechanism, used by mmc core, host driver and mmc requests
* supplier.
*/
void mmc_init_context_info(struct mmc_host *host)
{
spin_lock_init(&host->context_info.lock);
host->context_info.is_new_req = false;
host->context_info.is_done_rcv = false;
host->context_info.is_waiting_last_req = false;
init_waitqueue_head(&host->context_info.wait);
}
#ifdef CONFIG_MMC_EMBEDDED_SDIO
void mmc_set_embedded_sdio_data(struct mmc_host *host,
struct sdio_cis *cis,
struct sdio_cccr *cccr,
struct sdio_embedded_func *funcs,
int num_funcs)
{
host->embedded_sdio_data.cis = cis;
host->embedded_sdio_data.cccr = cccr;
host->embedded_sdio_data.funcs = funcs;
host->embedded_sdio_data.num_funcs = num_funcs;
}
EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
#endif
static int __init mmc_init(void)
{
int ret;
workqueue = alloc_ordered_workqueue("kmmcd", 0);
if (!workqueue)
return -ENOMEM;
ret = mmc_register_bus();
if (ret)
goto destroy_workqueue;
ret = mmc_register_host_class();
if (ret)
goto unregister_bus;
ret = sdio_register_bus();
if (ret)
goto unregister_host_class;
return 0;
unregister_host_class:
mmc_unregister_host_class();
unregister_bus:
mmc_unregister_bus();
destroy_workqueue:
destroy_workqueue(workqueue);
return ret;
}
static void __exit mmc_exit(void)
{
sdio_unregister_bus();
mmc_unregister_host_class();
mmc_unregister_bus();
destroy_workqueue(workqueue);
}
subsys_initcall(mmc_init);
module_exit(mmc_exit);
MODULE_LICENSE("GPL");
| fedosis/android_kernel_xiaomi_msm8937 | drivers/mmc/core/core.c | C | gpl-2.0 | 102,367 |
/*
* Copyright 2009-2012 Freescale Semiconductor, Inc. All Rights Reserved.
*/
/*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
/*!
* @file ipu_csi_enc.c
*
* @brief CSI Use case for video capture
*
* @ingroup IPU
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/ipu.h>
#include <mach/mipi_csi2.h>
#include "mxc_v4l2_capture.h"
#include "ipu_prp_sw.h"
#ifdef CAMERA_DBG
#define CAMERA_TRACE(x) (printk)x
#else
#define CAMERA_TRACE(x)
#endif
/*
* Function definitions
*/
/*!
* csi ENC callback function.
*
* @param irq int irq line
* @param dev_id void * device id
*
* @return status IRQ_HANDLED for handled
*/
static irqreturn_t csi_enc_callback(int irq, void *dev_id)
{
cam_data *cam = (cam_data *) dev_id;
if (cam->enc_callback == NULL)
return IRQ_HANDLED;
cam->enc_callback(irq, dev_id);
return IRQ_HANDLED;
}
/*!
* CSI ENC enable channel setup function
*
* @param cam struct cam_data * mxc capture instance
*
* @return status
*/
static int csi_enc_setup(cam_data *cam)
{
ipu_channel_params_t params;
u32 pixel_fmt;
int err = 0, sensor_protocol = 0;
dma_addr_t dummy = cam->dummy_frame.buffer.m.offset;
#ifdef CONFIG_MXC_MIPI_CSI2
void *mipi_csi2_info;
int ipu_id;
int csi_id;
#endif
CAMERA_TRACE("In csi_enc_setup\n");
if (!cam) {
printk(KERN_ERR "cam private is NULL\n");
return -ENXIO;
}
memset(¶ms, 0, sizeof(ipu_channel_params_t));
params.csi_mem.csi = cam->csi;
sensor_protocol = ipu_csi_get_sensor_protocol(cam->ipu, cam->csi);
switch (sensor_protocol) {
case IPU_CSI_CLK_MODE_GATED_CLK:
case IPU_CSI_CLK_MODE_NONGATED_CLK:
case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
params.csi_mem.interlaced = false;
break;
case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
params.csi_mem.interlaced = true;
break;
default:
printk(KERN_ERR "sensor protocol unsupported\n");
return -EINVAL;
}
if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
pixel_fmt = IPU_PIX_FMT_YUV420P;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P)
pixel_fmt = IPU_PIX_FMT_YUV422P;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
pixel_fmt = IPU_PIX_FMT_UYVY;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
pixel_fmt = IPU_PIX_FMT_YUYV;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12)
pixel_fmt = IPU_PIX_FMT_NV12;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24)
pixel_fmt = IPU_PIX_FMT_BGR24;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
pixel_fmt = IPU_PIX_FMT_RGB24;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565)
pixel_fmt = IPU_PIX_FMT_RGB565;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32)
pixel_fmt = IPU_PIX_FMT_BGR32;
else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32)
pixel_fmt = IPU_PIX_FMT_RGB32;
else {
printk(KERN_ERR "format not supported\n");
return -EINVAL;
}
#ifdef CONFIG_MXC_MIPI_CSI2
mipi_csi2_info = mipi_csi2_get_info();
if (mipi_csi2_info) {
if (mipi_csi2_get_status(mipi_csi2_info)) {
ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
if (cam->ipu == ipu_get_soc(ipu_id)
&& cam->csi == csi_id) {
params.csi_mem.mipi_en = true;
params.csi_mem.mipi_vc =
mipi_csi2_get_virtual_channel(mipi_csi2_info);
params.csi_mem.mipi_id =
mipi_csi2_get_datatype(mipi_csi2_info);
mipi_csi2_pixelclk_enable(mipi_csi2_info);
} else {
params.csi_mem.mipi_en = false;
params.csi_mem.mipi_vc = 0;
params.csi_mem.mipi_id = 0;
}
} else {
params.csi_mem.mipi_en = false;
params.csi_mem.mipi_vc = 0;
params.csi_mem.mipi_id = 0;
}
} else {
printk(KERN_ERR "Fail to get mipi_csi2_info!\n");
return -EPERM;
}
#endif
err = ipu_init_channel(cam->ipu, CSI_MEM, ¶ms);
if (err != 0) {
printk(KERN_ERR "ipu_init_channel %d\n", err);
return err;
}
err = ipu_init_channel_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
pixel_fmt, cam->v2f.fmt.pix.width,
cam->v2f.fmt.pix.height,
cam->v2f.fmt.pix.bytesperline,
cam->rotation,
dummy, dummy, 0,
cam->offset.u_offset,
cam->offset.v_offset);
if (err != 0) {
printk(KERN_ERR "CSI_MEM output buffer\n");
return err;
}
err = ipu_enable_channel(cam->ipu, CSI_MEM);
if (err < 0) {
printk(KERN_ERR "ipu_enable_channel CSI_MEM\n");
return err;
}
return err;
}
/*!
* function to update physical buffer address for encorder IDMA channel
*
* @param eba physical buffer address for encorder IDMA channel
* @param buffer_num int buffer 0 or buffer 1
*
* @return status
*/
static int csi_enc_eba_update(struct ipu_soc *ipu, dma_addr_t eba, int *buffer_num)
{
int err = 0;
pr_debug("eba %x\n", eba);
err = ipu_update_channel_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
*buffer_num, eba);
if (err != 0) {
ipu_clear_buffer_ready(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
*buffer_num);
err = ipu_update_channel_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
*buffer_num, eba);
if (err != 0) {
pr_err("ERROR: v4l2 capture: fail to update "
"buf%d\n", *buffer_num);
return err;
}
}
ipu_select_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER, *buffer_num);
*buffer_num = (*buffer_num == 0) ? 1 : 0;
return 0;
}
/*!
* Enable encoder task
* @param private struct cam_data * mxc capture instance
*
* @return status
*/
static int csi_enc_enabling_tasks(void *private)
{
cam_data *cam = (cam_data *) private;
int err = 0;
CAMERA_TRACE("IPU:In csi_enc_enabling_tasks\n");
if (cam->dummy_frame.vaddress &&
cam->dummy_frame.buffer.length
< PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage)) {
dma_free_coherent(0, cam->dummy_frame.buffer.length,
cam->dummy_frame.vaddress,
cam->dummy_frame.paddress);
cam->dummy_frame.vaddress = 0;
}
if (!cam->dummy_frame.vaddress) {
cam->dummy_frame.vaddress = dma_alloc_coherent(0,
PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
&cam->dummy_frame.paddress,
GFP_DMA | GFP_KERNEL);
if (cam->dummy_frame.vaddress == 0) {
pr_err("ERROR: v4l2 capture: Allocate dummy frame "
"failed.\n");
return -ENOBUFS;
}
cam->dummy_frame.buffer.length =
PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
}
cam->dummy_frame.buffer.type = V4L2_BUF_TYPE_PRIVATE;
cam->dummy_frame.buffer.m.offset = cam->dummy_frame.paddress;
ipu_clear_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF);
err = ipu_request_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF,
csi_enc_callback, 0, "Mxc Camera", cam);
if (err != 0) {
printk(KERN_ERR "Error registering rot irq\n");
return err;
}
err = csi_enc_setup(cam);
if (err != 0) {
printk(KERN_ERR "csi_enc_setup %d\n", err);
return err;
}
return err;
}
/*!
* Disable encoder task
* @param private struct cam_data * mxc capture instance
*
* @return int
*/
static int csi_enc_disabling_tasks(void *private)
{
cam_data *cam = (cam_data *) private;
int err = 0;
#ifdef CONFIG_MXC_MIPI_CSI2
void *mipi_csi2_info;
int ipu_id;
int csi_id;
#endif
err = ipu_disable_channel(cam->ipu, CSI_MEM, true);
ipu_uninit_channel(cam->ipu, CSI_MEM);
#ifdef CONFIG_MXC_MIPI_CSI2
mipi_csi2_info = mipi_csi2_get_info();
if (mipi_csi2_info) {
if (mipi_csi2_get_status(mipi_csi2_info)) {
ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
if (cam->ipu == ipu_get_soc(ipu_id)
&& cam->csi == csi_id)
mipi_csi2_pixelclk_disable(mipi_csi2_info);
}
} else {
printk(KERN_ERR "Fail to get mipi_csi2_info!\n");
return -EPERM;
}
#endif
return err;
}
/*!
* Enable csi
* @param private struct cam_data * mxc capture instance
*
* @return status
*/
static int csi_enc_enable_csi(void *private)
{
cam_data *cam = (cam_data *) private;
return ipu_enable_csi(cam->ipu, cam->csi);
}
/*!
* Disable csi
* @param private struct cam_data * mxc capture instance
*
* @return status
*/
static int csi_enc_disable_csi(void *private)
{
cam_data *cam = (cam_data *) private;
/* free csi eof irq firstly.
* when disable csi, wait for idmac eof.
* it requests eof irq again */
ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
return ipu_disable_csi(cam->ipu, cam->csi);
}
/*!
* function to select CSI ENC as the working path
*
* @param private struct cam_data * mxc capture instance
*
* @return int
*/
int csi_enc_select(void *private)
{
cam_data *cam = (cam_data *) private;
int err = 0;
if (cam) {
cam->enc_update_eba = csi_enc_eba_update;
cam->enc_enable = csi_enc_enabling_tasks;
cam->enc_disable = csi_enc_disabling_tasks;
cam->enc_enable_csi = csi_enc_enable_csi;
cam->enc_disable_csi = csi_enc_disable_csi;
} else {
err = -EIO;
}
return err;
}
/*!
* function to de-select CSI ENC as the working path
*
* @param private struct cam_data * mxc capture instance
*
* @return int
*/
int csi_enc_deselect(void *private)
{
cam_data *cam = (cam_data *) private;
int err = 0;
if (cam) {
cam->enc_update_eba = NULL;
cam->enc_enable = NULL;
cam->enc_disable = NULL;
cam->enc_enable_csi = NULL;
cam->enc_disable_csi = NULL;
}
return err;
}
/*!
* Init the Encorder channels
*
* @return Error code indicating success or failure
*/
__init int csi_enc_init(void)
{
return 0;
}
/*!
* Deinit the Encorder channels
*
*/
void __exit csi_enc_exit(void)
{
}
module_init(csi_enc_init);
module_exit(csi_enc_exit);
EXPORT_SYMBOL(csi_enc_select);
EXPORT_SYMBOL(csi_enc_deselect);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("CSI ENC Driver");
MODULE_LICENSE("GPL");
| zOrg1331/wandboard-kernel | drivers/media/video/mxc/capture/ipu_csi_enc.c | C | gpl-2.0 | 10,146 |
/*
* linux/mm/filemap.c
*
* Copyright (C) 1994-1999 Linus Torvalds
*/
/*
* This file handles the generic file mmap semantics used by
* most "normal" filesystems (but you don't /have/ to use this:
* the NFS filesystem used to do this differently, for example)
*/
#include <linux/export.h>
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/aio.h>
#include <linux/capability.h>
#include <linux/kernel_stat.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/uio.h>
#include <linux/hash.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/security.h>
#include <linux/cpuset.h>
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "internal.h"
#include "../fs/sreadahead_prof.h"
/*
* FIXME: remove all knowledge of the buffer layer from the core VM
*/
#include <linux/buffer_head.h> /* for try_to_free_buffers */
#include <asm/mman.h>
/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
*
* Shared mappings now work. 15.8.1995 Bruno.
*
* finished 'unifying' the page and buffer cache and SMP-threaded the
* page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
*
* SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
*/
/*
* Lock ordering:
*
* ->i_mmap_mutex (truncate_pagecache)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
*
* ->i_mutex
* ->i_mmap_mutex (truncate->unmap_mapping_range)
*
* ->mmap_sem
* ->i_mmap_mutex
* ->page_table_lock or pte_lock (various, mainly in memory.c)
* ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
*
* ->mmap_sem
* ->lock_page (access_process_vm)
*
* ->i_mutex (generic_file_buffered_write)
* ->mmap_sem (fault_in_pages_readable->do_page_fault)
*
* bdi->wb.list_lock
* sb_lock (fs/fs-writeback.c)
* ->mapping->tree_lock (__sync_single_inode)
*
* ->i_mmap_mutex
* ->anon_vma.lock (vma_adjust)
*
* ->anon_vma.lock
* ->page_table_lock or pte_lock (anon_vma_prepare and various)
*
* ->page_table_lock or pte_lock
* ->swap_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->tree_lock (try_to_unmap_one)
* ->zone.lru_lock (follow_page->mark_page_accessed)
* ->zone.lru_lock (check_pte_range->isolate_lru_page)
* ->private_lock (page_remove_rmap->set_page_dirty)
* ->tree_lock (page_remove_rmap->set_page_dirty)
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->__set_page_dirty_buffers)
*
* ->i_mmap_mutex
* ->tasklist_lock (memory_failure, collect_procs_ao)
*/
/*
* Delete a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage
* is safe. The caller must hold the mapping's tree_lock.
*/
void __delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
/*
* if we're uptodate, flush out into the cleancache, otherwise
* invalidate any existing cleancache entries. We can't leave
* stale data around in the cleancache once our page is gone
*/
if (PageUptodate(page) && PageMappedToDisk(page))
cleancache_put_page(page);
else
cleancache_invalidate_page(mapping, page);
radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
if (PageSwapBacked(page))
__dec_zone_page_state(page, NR_SHMEM);
BUG_ON(page_mapped(page));
/*
* Some filesystems seem to re-dirty the page even after
* the VM has canceled the dirty bit (eg ext3 journaling).
*
* Fix it up by doing a final dirty accounting check after
* having removed the page entirely.
*/
if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
}
}
/**
* delete_from_page_cache - delete page from page cache
* @page: the page which the kernel is trying to remove from page cache
*
* This must be called only on pages that have been verified to be in the page
* cache and locked. It will never put the page into the free list, the caller
* has a reference on the page.
*/
void delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
void (*freepage)(struct page *);
BUG_ON(!PageLocked(page));
freepage = mapping->a_ops->freepage;
spin_lock_irq(&mapping->tree_lock);
__delete_from_page_cache(page);
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
if (freepage)
freepage(page);
page_cache_release(page);
}
EXPORT_SYMBOL(delete_from_page_cache);
static int sleep_on_page(void *word)
{
io_schedule();
return 0;
}
static int sleep_on_page_killable(void *word)
{
sleep_on_page(word);
return fatal_signal_pending(current) ? -EINTR : 0;
}
/**
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
* @start: offset in bytes where the range starts
* @end: offset in bytes where the range ends (inclusive)
* @sync_mode: enable synchronous operation
*
* Start writeback against all of a mapping's dirty pages that lie
* within the byte offsets <start, end> inclusive.
*
* If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
* opposed to a regular memory cleansing writeback. The difference between
* these two operations is that if a dirty page/buffer is encountered, it must
* be waited upon, and not just skipped over.
*/
int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
loff_t end, int sync_mode)
{
int ret;
struct writeback_control wbc = {
.sync_mode = sync_mode,
.nr_to_write = LONG_MAX,
.range_start = start,
.range_end = end,
};
if (!mapping_cap_writeback_dirty(mapping))
return 0;
ret = do_writepages(mapping, &wbc);
return ret;
}
static inline int __filemap_fdatawrite(struct address_space *mapping,
int sync_mode)
{
return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
}
int filemap_fdatawrite(struct address_space *mapping)
{
return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
}
EXPORT_SYMBOL(filemap_fdatawrite);
int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
loff_t end)
{
return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
}
EXPORT_SYMBOL(filemap_fdatawrite_range);
/**
* filemap_flush - mostly a non-blocking flush
* @mapping: target address_space
*
* This is a mostly non-blocking flush. Not suitable for data-integrity
* purposes - I/O may not be started against all dirty pages.
*/
int filemap_flush(struct address_space *mapping)
{
return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
}
EXPORT_SYMBOL(filemap_flush);
/**
* filemap_fdatawait_range - wait for writeback to complete
* @mapping: address space structure to wait for
* @start_byte: offset in bytes where the range starts
* @end_byte: offset in bytes where the range ends (inclusive)
*
* Walk the list of under-writeback pages of the given address space
* in the given range and wait for all of them.
*/
int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
loff_t end_byte)
{
pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
struct pagevec pvec;
int nr_pages;
int ret = 0;
if (end_byte < start_byte)
return 0;
pagevec_init(&pvec, 0);
while ((index <= end) &&
(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_WRITEBACK,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
unsigned i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/* until radix tree lookup accepts end_index */
if (page->index > end)
continue;
wait_on_page_writeback(page);
if (TestClearPageError(page))
ret = -EIO;
}
pagevec_release(&pvec);
cond_resched();
}
/* Check for outstanding write errors */
if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
ret = -ENOSPC;
if (test_and_clear_bit(AS_EIO, &mapping->flags))
ret = -EIO;
return ret;
}
EXPORT_SYMBOL(filemap_fdatawait_range);
/**
* filemap_fdatawait - wait for all under-writeback pages to complete
* @mapping: address space structure to wait for
*
* Walk the list of under-writeback pages of the given address space
* and wait for all of them.
*/
int filemap_fdatawait(struct address_space *mapping)
{
loff_t i_size = i_size_read(mapping->host);
if (i_size == 0)
return 0;
return filemap_fdatawait_range(mapping, 0, i_size - 1);
}
EXPORT_SYMBOL(filemap_fdatawait);
int filemap_write_and_wait(struct address_space *mapping)
{
int err = 0;
if (mapping->nrpages) {
err = filemap_fdatawrite(mapping);
/*
* Even if the above returned error, the pages may be
* written partially (e.g. -ENOSPC), so we wait for it.
* But the -EIO is special case, it may indicate the worst
* thing (e.g. bug) happened, so we avoid waiting for it.
*/
if (err != -EIO) {
int err2 = filemap_fdatawait(mapping);
if (!err)
err = err2;
}
}
return err;
}
EXPORT_SYMBOL(filemap_write_and_wait);
/**
* filemap_write_and_wait_range - write out & wait on a file range
* @mapping: the address_space for the pages
* @lstart: offset in bytes where the range starts
* @lend: offset in bytes where the range ends (inclusive)
*
* Write out and wait upon file offsets lstart->lend, inclusive.
*
* Note that `lend' is inclusive (describes the last byte to be written) so
* that this function can be used to write to the very end-of-file (end = -1).
*/
int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend)
{
int err = 0;
if (mapping->nrpages) {
err = __filemap_fdatawrite_range(mapping, lstart, lend,
WB_SYNC_ALL);
/* See comment of filemap_write_and_wait() */
if (err != -EIO) {
int err2 = filemap_fdatawait_range(mapping,
lstart, lend);
if (!err)
err = err2;
}
}
return err;
}
EXPORT_SYMBOL(filemap_write_and_wait_range);
/**
* replace_page_cache_page - replace a pagecache page with a new one
* @old: page to be replaced
* @new: page to replace with
* @gfp_mask: allocation mode
*
* This function replaces a page in the pagecache with a new one. On
* success it acquires the pagecache reference for the new page and
* drops it for the old page. Both the old and new pages must be
* locked. This function does not add the new page to the LRU, the
* caller must do that.
*
* The remove + add is atomic. The only way this function can fail is
* memory allocation failure.
*/
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
{
int error;
VM_BUG_ON(!PageLocked(old));
VM_BUG_ON(!PageLocked(new));
VM_BUG_ON(new->mapping);
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (!error) {
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *);
pgoff_t offset = old->index;
freepage = mapping->a_ops->freepage;
page_cache_get(new);
new->mapping = mapping;
new->index = offset;
spin_lock_irq(&mapping->tree_lock);
__delete_from_page_cache(old);
error = radix_tree_insert(&mapping->page_tree, offset, new);
BUG_ON(error);
mapping->nrpages++;
__inc_zone_page_state(new, NR_FILE_PAGES);
if (PageSwapBacked(new))
__inc_zone_page_state(new, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
/* mem_cgroup codes must not be called under tree_lock */
mem_cgroup_replace_page_cache(old, new);
radix_tree_preload_end();
if (freepage)
freepage(old);
page_cache_release(old);
}
return error;
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
/**
* add_to_page_cache_locked - add a locked page to the pagecache
* @page: page to add
* @mapping: the page's address_space
* @offset: page index
* @gfp_mask: page allocation mode
*
* This function is used to add a page to the pagecache. It must be locked.
* This function does not add the page to the LRU. The caller must do that.
*/
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
int error;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageSwapBacked(page));
error = mem_cgroup_cache_charge(page, current->mm,
gfp_mask & GFP_RECLAIM_MASK);
if (error)
goto out;
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (error == 0) {
page_cache_get(page);
page->mapping = mapping;
page->index = offset;
spin_lock_irq(&mapping->tree_lock);
error = radix_tree_insert(&mapping->page_tree, offset, page);
if (likely(!error)) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
/* Leave page->index set: truncation relies upon it */
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
page_cache_release(page);
}
radix_tree_preload_end();
} else
mem_cgroup_uncharge_cache_page(page);
out:
return error;
}
EXPORT_SYMBOL(add_to_page_cache_locked);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
int ret;
ret = add_to_page_cache(page, mapping, offset, gfp_mask);
if (ret == 0)
lru_cache_add_file(page);
return ret;
}
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
#ifdef CONFIG_NUMA
struct page *__page_cache_alloc(gfp_t gfp)
{
int n;
struct page *page;
if (cpuset_do_page_mem_spread()) {
unsigned int cpuset_mems_cookie;
do {
cpuset_mems_cookie = get_mems_allowed();
n = cpuset_mem_spread_node();
page = alloc_pages_exact_node(n, gfp, 0);
} while (!put_mems_allowed(cpuset_mems_cookie) && !page);
return page;
}
return alloc_pages(gfp, 0);
}
EXPORT_SYMBOL(__page_cache_alloc);
#endif
/*
* In order to wait for pages to become available there must be
* waitqueues associated with pages. By using a hash table of
* waitqueues where the bucket discipline is to maintain all
* waiters on the same queue and wake all when any of the pages
* become available, and for the woken contexts to check to be
* sure the appropriate page became available, this saves space
* at a cost of "thundering herd" phenomena during rare hash
* collisions.
*/
static wait_queue_head_t *page_waitqueue(struct page *page)
{
const struct zone *zone = page_zone(page);
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
}
static inline void wake_up_page(struct page *page, int bit)
{
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
void wait_on_page_bit(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
if (test_bit(bit_nr, &page->flags))
__wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_on_page_bit);
int wait_on_page_bit_killable(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
if (!test_bit(bit_nr, &page->flags))
return 0;
return __wait_on_bit(page_waitqueue(page), &wait,
sleep_on_page_killable, TASK_KILLABLE);
}
/**
* add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
* @page: Page defining the wait queue of interest
* @waiter: Waiter to add to the queue
*
* Add an arbitrary @waiter to the wait queue for the nominated @page.
*/
void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
{
wait_queue_head_t *q = page_waitqueue(page);
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, waiter);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL_GPL(add_page_wait_queue);
/**
* unlock_page - unlock a locked page
* @page: the page
*
* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
* Also wakes sleepers in wait_on_page_writeback() because the wakeup
* mechananism between PageLocked pages and PageWriteback pages is shared.
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
*
* The mb is necessary to enforce ordering between the clear_bit and the read
* of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
*/
void unlock_page(struct page *page)
{
VM_BUG_ON(!PageLocked(page));
clear_bit_unlock(PG_locked, &page->flags);
smp_mb__after_clear_bit();
wake_up_page(page, PG_locked);
}
EXPORT_SYMBOL(unlock_page);
/**
* end_page_writeback - end writeback against a page
* @page: the page
*/
void end_page_writeback(struct page *page)
{
if (TestClearPageReclaim(page))
rotate_reclaimable_page(page);
if (!test_clear_page_writeback(page))
BUG();
smp_mb__after_clear_bit();
wake_up_page(page, PG_writeback);
}
EXPORT_SYMBOL(end_page_writeback);
/**
* __lock_page - get a lock on the page, assuming we need to sleep to get it
* @page: the page to lock
*/
void __lock_page(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
__wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_page);
int __lock_page_killable(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
return __wait_on_bit_lock(page_waitqueue(page), &wait,
sleep_on_page_killable, TASK_KILLABLE);
}
EXPORT_SYMBOL_GPL(__lock_page_killable);
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
if (flags & FAULT_FLAG_ALLOW_RETRY) {
/*
* CAUTION! In this case, mmap_sem is not released
* even though return 0.
*/
if (flags & FAULT_FLAG_RETRY_NOWAIT)
return 0;
up_read(&mm->mmap_sem);
if (flags & FAULT_FLAG_KILLABLE)
wait_on_page_locked_killable(page);
else
wait_on_page_locked(page);
return 0;
} else {
if (flags & FAULT_FLAG_KILLABLE) {
int ret;
ret = __lock_page_killable(page);
if (ret) {
up_read(&mm->mmap_sem);
return 0;
}
} else
__lock_page(page);
return 1;
}
}
/**
* find_get_page - find and get a page reference
* @mapping: the address_space to search
* @offset: the page index
*
* Is there a pagecache struct page at the given (mapping, offset) tuple?
* If yes, increment its refcount and return it; if no, return NULL.
*/
struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
{
void **pagep;
struct page *page;
rcu_read_lock();
repeat:
page = NULL;
pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
if (pagep) {
page = radix_tree_deref_slot(pagep);
if (unlikely(!page))
goto out;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page))
goto repeat;
/*
* Otherwise, shmem/tmpfs must be storing a swap entry
* here as an exceptional entry: so return it without
* attempting to raise page count.
*/
goto out;
}
if (!page_cache_get_speculative(page))
goto repeat;
/*
* Has the page moved?
* This is part of the lockless pagecache protocol. See
* include/linux/pagemap.h for details.
*/
if (unlikely(page != *pagep)) {
page_cache_release(page);
goto repeat;
}
}
out:
rcu_read_unlock();
return page;
}
EXPORT_SYMBOL(find_get_page);
/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
* @offset: the page index
*
* Locates the desired pagecache page, locks it, increments its reference
* count and returns its address.
*
* Returns zero if the page was not present. find_lock_page() may sleep.
*/
struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
{
struct page *page;
repeat:
page = find_get_page(mapping, offset);
if (page && !radix_tree_exception(page)) {
lock_page(page);
/* Has the page been truncated? */
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
page_cache_release(page);
goto repeat;
}
VM_BUG_ON(page->index != offset);
}
return page;
}
EXPORT_SYMBOL(find_lock_page);
/**
* find_or_create_page - locate or add a pagecache page
* @mapping: the page's address_space
* @index: the page's index into the mapping
* @gfp_mask: page allocation mode
*
* Locates a page in the pagecache. If the page is not present, a new page
* is allocated using @gfp_mask and is added to the pagecache and to the VM's
* LRU list. The returned page is locked and has its reference count
* incremented.
*
* find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
* allocation!
*
* find_or_create_page() returns the desired page's address, or zero on
* memory exhaustion.
*/
struct page *find_or_create_page(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask)
{
struct page *page;
int err;
repeat:
page = find_lock_page(mapping, index);
if (!page) {
page = __page_cache_alloc(gfp_mask);
if (!page)
return NULL;
/*
* We want a regular kernel memory (not highmem or DMA etc)
* allocation for the radix tree nodes, but we need to honour
* the context-specific requirements the caller has asked for.
* GFP_RECLAIM_MASK collects those requirements.
*/
err = add_to_page_cache_lru(page, mapping, index,
(gfp_mask & GFP_RECLAIM_MASK));
if (unlikely(err)) {
page_cache_release(page);
page = NULL;
if (err == -EEXIST)
goto repeat;
}
}
return page;
}
EXPORT_SYMBOL(find_or_create_page);
/**
* find_get_pages - gang pagecache lookup
* @mapping: The address_space to search
* @start: The starting page index
* @nr_pages: The maximum number of pages
* @pages: Where the resulting pages are placed
*
* find_get_pages() will search for and return a group of up to
* @nr_pages pages in the mapping. The pages are placed at @pages.
* find_get_pages() takes a reference against the returned pages.
*
* The search returns a group of mapping-contiguous pages with ascending
* indexes. There may be holes in the indices due to not-present pages.
*
* find_get_pages() returns the number of pages which were found.
*/
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
/*
* Transient condition which can only trigger
* when entry at index 0 moves out of or back
* to root: none yet gotten, safe to restart.
*/
WARN_ON(iter.index);
goto restart;
}
/*
* Otherwise, shmem/tmpfs must be storing a swap entry
* here as an exceptional entry: so skip over it -
* we only reach this from invalidate_mapping_pages().
*/
continue;
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
/**
* find_get_pages_contig - gang contiguous pagecache lookup
* @mapping: The address_space to search
* @index: The starting page index
* @nr_pages: The maximum number of pages
* @pages: Where the resulting pages are placed
*
* find_get_pages_contig() works exactly like find_get_pages(), except
* that the returned number of pages are guaranteed to be contiguous.
*
* find_get_pages_contig() returns the number of pages which were found.
*/
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned int ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
/* The hole, there no reason to continue */
if (unlikely(!page))
break;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
/*
* Transient condition which can only trigger
* when entry at index 0 moves out of or back
* to root: none yet gotten, safe to restart.
*/
goto restart;
}
/*
* Otherwise, shmem/tmpfs must be storing a swap entry
* here as an exceptional entry: so stop looking for
* contiguous pages.
*/
break;
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
/*
* must check mapping and index after taking the ref.
* otherwise we can get both false positives and false
* negatives, which is just confusing to the caller.
*/
if (page->mapping == NULL || page->index != iter.index) {
page_cache_release(page);
break;
}
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(find_get_pages_contig);
/**
* find_get_pages_tag - find and return pages that match @tag
* @mapping: the address_space to search
* @index: the starting page index
* @tag: the tag index
* @nr_pages: the maximum number of pages
* @pages: where the resulting pages are placed
*
* Like find_get_pages, except we only return pages which are tagged with
* @tag. We update @index to index the next page for the traversal.
*/
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
int tag, unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_tagged(slot, &mapping->page_tree,
&iter, *index, tag) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
/*
* Transient condition which can only trigger
* when entry at index 0 moves out of or back
* to root: none yet gotten, safe to restart.
*/
goto restart;
}
/*
* This function is never used on a shmem/tmpfs
* mapping, so a swap entry won't be found here.
*/
BUG();
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
if (ret)
*index = pages[ret - 1]->index + 1;
return ret;
}
EXPORT_SYMBOL(find_get_pages_tag);
/**
* grab_cache_page_nowait - returns locked page at given index in given cache
* @mapping: target address_space
* @index: the page index
*
* Same as grab_cache_page(), but do not wait if the page is unavailable.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
* be safe to call while holding the lock for another page.
*
* Clear __GFP_FS when allocating the page to avoid recursion into the fs
* and deadlock against the caller's locked page.
*/
struct page *
grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
{
struct page *page = find_get_page(mapping, index);
if (page) {
if (trylock_page(page))
return page;
page_cache_release(page);
return NULL;
}
page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
page_cache_release(page);
page = NULL;
}
return page;
}
EXPORT_SYMBOL(grab_cache_page_nowait);
/*
* CD/DVDs are error prone. When a medium error occurs, the driver may fail
* a _large_ part of the i/o request. Imagine the worst scenario:
*
* ---R__________________________________________B__________
* ^ reading here ^ bad block(assume 4k)
*
* read(R) => miss => readahead(R...B) => media error => frustrating retries
* => failing the whole request => read(R) => read(R+1) =>
* readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
* readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
* readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
*
* It is going insane. Fix it by quickly scaling down the readahead size.
*/
static void shrink_readahead_size_eio(struct file *filp,
struct file_ra_state *ra)
{
ra->ra_pages /= 4;
}
/**
* do_generic_file_read - generic file read routine
* @filp: the file to read
* @ppos: current file position
* @desc: read_descriptor
* @actor: read method
*
* This is a generic file read routine, and uses the
* mapping->a_ops->readpage() function for the actual low-level stuff.
*
* This is really ugly. But the goto's actually try to clarify some
* of the logic when it comes to error handling etc.
*/
static void do_generic_file_read(struct file *filp, loff_t *ppos,
read_descriptor_t *desc, read_actor_t actor)
{
struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host;
struct file_ra_state *ra = &filp->f_ra;
pgoff_t index;
pgoff_t last_index;
pgoff_t prev_index;
unsigned long offset; /* offset into pagecache page */
unsigned int prev_offset;
int error;
index = *ppos >> PAGE_CACHE_SHIFT;
prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
for (;;) {
struct page *page;
pgoff_t end_index;
loff_t isize;
unsigned long nr, ret;
cond_resched();
find_page:
page = find_get_page(mapping, index);
if (!page) {
page_cache_sync_readahead(mapping,
ra, filp,
index, last_index - index);
page = find_get_page(mapping, index);
if (unlikely(page == NULL))
goto no_cached_page;
}
if (PageReadahead(page)) {
page_cache_async_readahead(mapping,
ra, filp, page,
index, last_index - index);
}
if (!PageUptodate(page)) {
if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
!mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date;
if (!trylock_page(page))
goto page_not_up_to_date;
/* Did it get truncated before we got the lock? */
if (!page->mapping)
goto page_not_up_to_date_locked;
if (!mapping->a_ops->is_partially_uptodate(page,
desc, offset))
goto page_not_up_to_date_locked;
unlock_page(page);
}
page_ok:
/*
* i_size must be checked after we know the page is Uptodate.
*
* Checking i_size after the check allows us to calculate
* the correct value for "nr", which means the zero-filled
* part of the page is not copied back to userspace (unless
* another truncate extends the file - this is desired though).
*/
isize = i_size_read(inode);
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
if (unlikely(!isize || index > end_index)) {
page_cache_release(page);
goto out;
}
/* nr is the maximum number of bytes to copy from this page */
nr = PAGE_CACHE_SIZE;
if (index == end_index) {
nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
if (nr <= offset) {
page_cache_release(page);
goto out;
}
}
nr = nr - offset;
/* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
/*
* When a sequential read accesses a page several times,
* only mark it as accessed the first time.
*/
if (prev_index != index || offset != prev_offset)
mark_page_accessed(page);
prev_index = index;
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
*
* The actor routine returns how many bytes were actually used..
* NOTE! This may not be the same as how much of a user buffer
* we filled up (we may be padding etc), so we can only update
* "pos" here (the actor routine has to update the user buffer
* pointers and the remaining count).
*/
ret = actor(desc, page, offset, nr);
offset += ret;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
prev_offset = offset;
page_cache_release(page);
if (ret == nr && desc->count)
continue;
goto out;
page_not_up_to_date:
/* Get exclusive access to the page ... */
error = lock_page_killable(page);
if (unlikely(error))
goto readpage_error;
page_not_up_to_date_locked:
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
unlock_page(page);
page_cache_release(page);
continue;
}
/* Did somebody else fill it already? */
if (PageUptodate(page)) {
unlock_page(page);
goto page_ok;
}
readpage:
/*
* A previous I/O error may have been due to temporary
* failures, eg. multipath errors.
* PG_error will be set again if readpage fails.
*/
ClearPageError(page);
/* Start the actual read. The read will unlock the page. */
error = mapping->a_ops->readpage(filp, page);
if (unlikely(error)) {
if (error == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
goto find_page;
}
goto readpage_error;
}
if (!PageUptodate(page)) {
error = lock_page_killable(page);
if (unlikely(error))
goto readpage_error;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
* invalidate_mapping_pages got it
*/
unlock_page(page);
page_cache_release(page);
goto find_page;
}
unlock_page(page);
shrink_readahead_size_eio(filp, ra);
error = -EIO;
goto readpage_error;
}
unlock_page(page);
}
goto page_ok;
readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
page_cache_release(page);
goto out;
no_cached_page:
/*
* Ok, it wasn't cached, so we need to create a new
* page..
*/
page = page_cache_alloc_cold(mapping);
if (!page) {
desc->error = -ENOMEM;
goto out;
}
error = add_to_page_cache_lru(page, mapping,
index, GFP_KERNEL);
if (error) {
page_cache_release(page);
if (error == -EEXIST)
goto find_page;
desc->error = error;
goto out;
}
goto readpage;
}
out:
ra->prev_pos = prev_index;
ra->prev_pos <<= PAGE_CACHE_SHIFT;
ra->prev_pos |= prev_offset;
*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
file_accessed(filp);
}
/*
* Performs necessary checks before doing a write
* @iov: io vector request
* @nr_segs: number of segments in the iovec
* @count: number of bytes to write
* @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
*
* Adjust number of segments and amount of bytes to write (nr_segs should be
* properly initialized first). Returns appropriate error code that caller
* should return or zero in case that write should be allowed.
*/
int generic_segment_checks(const struct iovec *iov,
unsigned long *nr_segs, size_t *count, int access_flags)
{
unsigned long seg;
size_t cnt = 0;
for (seg = 0; seg < *nr_segs; seg++) {
const struct iovec *iv = &iov[seg];
/*
* If any segment has a negative length, or the cumulative
* length ever wraps negative then return -EINVAL.
*/
cnt += iv->iov_len;
if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
return -EINVAL;
if (access_ok(access_flags, iv->iov_base, iv->iov_len))
continue;
if (seg == 0)
return -EFAULT;
*nr_segs = seg;
cnt -= iv->iov_len; /* This segment is no good */
break;
}
*count = cnt;
return 0;
}
EXPORT_SYMBOL(generic_segment_checks);
int file_read_iter_actor(read_descriptor_t *desc, struct page *page,
unsigned long offset, unsigned long size)
{
struct iov_iter *iter = desc->arg.data;
unsigned long copied = 0;
if (size > desc->count)
size = desc->count;
copied = iov_iter_copy_to_user(page, iter, offset, size);
if (copied < size)
desc->error = -EFAULT;
iov_iter_advance(iter, copied);
desc->count -= copied;
desc->written += copied;
return copied;
}
/**
* generic_file_read_iter - generic filesystem read routine
* @iocb: kernel I/O control block
* @iov_iter: memory vector
* @pos: current file position
*/
ssize_t
generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
struct file *filp = iocb->ki_filp;
read_descriptor_t desc;
ssize_t retval = 0;
size_t count = iov_iter_count(iter);
loff_t *ppos = &iocb->ki_pos;
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (filp->f_flags & O_DIRECT) {
loff_t size;
struct address_space *mapping;
struct inode *inode;
mapping = filp->f_mapping;
inode = mapping->host;
if (!count)
goto out; /* skip atime */
size = i_size_read(inode);
if (pos < size) {
retval = filemap_write_and_wait_range(mapping, pos,
pos + count - 1);
if (!retval) {
struct blk_plug plug;
blk_start_plug(&plug);
retval = mapping->a_ops->direct_IO(READ, iocb,
iter, pos);
blk_finish_plug(&plug);
}
if (retval > 0) {
*ppos = pos + retval;
count -= retval;
}
/*
* Btrfs can have a short DIO read if we encounter
* compressed extents, so if there was an error, or if
* we've already read everything we wanted to, or if
* there was a short read because we hit EOF, go ahead
* and return. Otherwise fallthrough to buffered io for
* the rest of the read.
*/
if (retval < 0 || !count || *ppos >= size) {
file_accessed(filp);
goto out;
}
}
}
desc.written = 0;
desc.arg.data = iter;
desc.count = count;
desc.error = 0;
do_generic_file_read(filp, ppos, &desc, file_read_iter_actor);
if (desc.written)
retval = desc.written;
else
retval = desc.error;
out:
return retval;
}
EXPORT_SYMBOL(generic_file_read_iter);
/**
* generic_file_aio_read - generic filesystem read routine
* @iocb: kernel I/O control block
* @iov: io vector request
* @nr_segs: number of segments in the iovec
* @pos: current file position
*
* This is the "read()" routine for all filesystems
* that can use the page cache directly.
*/
ssize_t
generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct iov_iter iter;
int ret;
size_t count;
count = 0;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (ret)
return ret;
iov_iter_init(&iter, iov, nr_segs, count, 0);
return generic_file_read_iter(iocb, &iter, pos);
}
EXPORT_SYMBOL(generic_file_aio_read);
#ifdef CONFIG_MMU
/**
* page_cache_read - adds requested page to the page cache if not already there
* @file: file to read
* @offset: page index
*
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
static int page_cache_read(struct file *file, pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
int ret;
do {
page = page_cache_alloc_cold(mapping);
if (!page)
return -ENOMEM;
ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
if (ret == 0)
ret = mapping->a_ops->readpage(file, page);
else if (ret == -EEXIST)
ret = 0; /* losing race to add is OK */
page_cache_release(page);
} while (ret == AOP_TRUNCATED_PAGE);
return ret;
}
#define MMAP_LOTSAMISS (100)
/*
* Synchronous readahead happens when we don't even find
* a page in the page cache at all.
*/
static void do_sync_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
pgoff_t offset)
{
unsigned long ra_pages;
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
if (VM_RandomReadHint(vma))
return;
if (!ra->ra_pages)
return;
if (VM_SequentialReadHint(vma)) {
page_cache_sync_readahead(mapping, ra, file, offset,
ra->ra_pages);
return;
}
/* Avoid banging the cache line if not needed */
if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
ra->mmap_miss++;
/*
* Do we miss much more than hit in this file? If so,
* stop bothering with read-ahead. It will only hurt.
*/
if (ra->mmap_miss > MMAP_LOTSAMISS)
return;
/*
* mmap read-around
*/
ra_pages = max_sane_readahead(ra->ra_pages);
ra->start = max_t(long, 0, offset - ra_pages / 2);
ra->size = ra_pages;
ra->async_size = ra_pages / 4;
ra_submit(ra, mapping, file);
}
/*
* Asynchronous readahead happens when we find the page and PG_readahead,
* so we want to possibly extend the readahead further..
*/
static void do_async_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
struct page *page,
pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
if (VM_RandomReadHint(vma))
return;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
if (PageReadahead(page))
page_cache_async_readahead(mapping, ra, file,
page, offset, ra->ra_pages);
}
/**
* filemap_fault - read in file data for page fault handling
* @vma: vma in which the fault was taken
* @vmf: struct vm_fault containing details of the fault
*
* filemap_fault() is invoked via the vma operations vector for a
* mapped memory region to read in file data during a page fault.
*
* The goto's are kind of ugly, but this streamlines the normal case of having
* it in the page cache, and handles the special cases reasonably without
* having a lot of duplicated code.
*/
int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
int error;
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct file_ra_state *ra = &file->f_ra;
struct inode *inode = mapping->host;
pgoff_t offset = vmf->pgoff;
struct page *page;
pgoff_t size;
int ret = 0;
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (offset >= size)
return VM_FAULT_SIGBUS;
/*
* Do we have something in the page cache already?
*/
page = find_get_page(mapping, offset);
if (likely(page)) {
/*
* We found the page, so try async readahead before
* waiting for the lock.
*/
do_async_mmap_readahead(vma, ra, file, page, offset);
} else {
/* No page in the page cache at all */
do_sync_mmap_readahead(vma, ra, file, offset);
count_vm_event(PGMAJFAULT);
/*
*/
sreadahead_prof(file, 0, 0);
/* */
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;
retry_find:
page = find_get_page(mapping, offset);
if (!page)
goto no_cached_page;
}
if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
page_cache_release(page);
return ret | VM_FAULT_RETRY;
}
/* Did it get truncated? */
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
put_page(page);
goto retry_find;
}
VM_BUG_ON(page->index != offset);
/*
* We have a locked page in the page cache, now we need to check
* that it's up-to-date. If not, it is going to be due to an error.
*/
if (unlikely(!PageUptodate(page)))
goto page_not_uptodate;
/*
* Found the page and have a reference on it.
* We must recheck i_size under page lock.
*/
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (unlikely(offset >= size)) {
unlock_page(page);
page_cache_release(page);
return VM_FAULT_SIGBUS;
}
vmf->page = page;
return ret | VM_FAULT_LOCKED;
no_cached_page:
/*
* We're only likely to ever get here if MADV_RANDOM is in
* effect.
*/
error = page_cache_read(file, offset);
/*
* The page we want has now been added to the page cache.
* In the unlikely event that someone removed it in the
* meantime, we'll just come back here and read it again.
*/
if (error >= 0)
goto retry_find;
/*
* An error return from page_cache_read can result if the
* system is low on memory, or a problem occurs while trying
* to schedule I/O.
*/
if (error == -ENOMEM)
return VM_FAULT_OOM;
return VM_FAULT_SIGBUS;
page_not_uptodate:
/*
* Umm, take care of errors if the page isn't up-to-date.
* Try to re-read it _once_. We do this synchronously,
* because there really aren't any performance issues here
* and we need to check for errors.
*/
ClearPageError(page);
error = mapping->a_ops->readpage(file, page);
if (!error) {
wait_on_page_locked(page);
if (!PageUptodate(page))
error = -EIO;
}
page_cache_release(page);
if (!error || error == AOP_TRUNCATED_PAGE)
goto retry_find;
/* Things didn't work out. Return zero to tell the mm layer so. */
shrink_readahead_size_eio(file, ra);
return VM_FAULT_SIGBUS;
}
EXPORT_SYMBOL(filemap_fault);
int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
int ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
lock_page(page);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out;
}
/*
* We mark the page dirty already here so that when freeze is in
* progress, we are guaranteed that writeback during freezing will
* see the dirty page and writeprotect it again.
*/
set_page_dirty(page);
out:
sb_end_pagefault(inode->i_sb);
return ret;
}
EXPORT_SYMBOL(filemap_page_mkwrite);
const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = filemap_page_mkwrite,
};
/* This is used for a general mmap of a disk file */
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{
struct address_space *mapping = file->f_mapping;
if (!mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &generic_file_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
/*
* This is for filesystems which do not implement ->writepage.
*/
int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
{
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
return generic_file_mmap(file, vma);
}
#else
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{
return -ENOSYS;
}
int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
{
return -ENOSYS;
}
#endif /* CONFIG_MMU */
EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap);
static struct page *__read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data,
gfp_t gfp)
{
struct page *page;
int err;
repeat:
page = find_get_page(mapping, index);
if (!page) {
page = __page_cache_alloc(gfp | __GFP_COLD);
if (!page)
return ERR_PTR(-ENOMEM);
err = add_to_page_cache_lru(page, mapping, index, gfp);
if (unlikely(err)) {
page_cache_release(page);
if (err == -EEXIST)
goto repeat;
/* Presumably ENOMEM for radix tree node */
return ERR_PTR(err);
}
err = filler(data, page);
if (err < 0) {
page_cache_release(page);
page = ERR_PTR(err);
}
}
return page;
}
static struct page *do_read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data,
gfp_t gfp)
{
struct page *page;
int err;
retry:
page = __read_cache_page(mapping, index, filler, data, gfp);
if (IS_ERR(page))
return page;
if (PageUptodate(page))
goto out;
lock_page(page);
if (!page->mapping) {
unlock_page(page);
page_cache_release(page);
goto retry;
}
if (PageUptodate(page)) {
unlock_page(page);
goto out;
}
err = filler(data, page);
if (err < 0) {
page_cache_release(page);
return ERR_PTR(err);
}
out:
mark_page_accessed(page);
return page;
}
/**
* read_cache_page_async - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: first arg to filler(data, page) function, often left as NULL
*
* Same as read_cache_page, but don't wait for page to become unlocked
* after submitting it to the filler.
*
* Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page but don't wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_async(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data)
{
return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(read_cache_page_async);
static struct page *wait_on_page_read(struct page *page)
{
if (!IS_ERR(page)) {
wait_on_page_locked(page);
if (!PageUptodate(page)) {
page_cache_release(page);
page = ERR_PTR(-EIO);
}
}
return page;
}
/**
* read_cache_page_gfp - read into page cache, using specified page allocation flags.
* @mapping: the page's address_space
* @index: the page index
* @gfp: the page allocator flags to use if allocating
*
* This is the same as "read_mapping_page(mapping, index, NULL)", but with
* any new page allocations done using the specified allocation flags.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_gfp(struct address_space *mapping,
pgoff_t index,
gfp_t gfp)
{
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
}
EXPORT_SYMBOL(read_cache_page_gfp);
/**
* read_cache_page - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: first arg to filler(data, page) function, often left as NULL
*
* Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page then wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data)
{
return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
}
EXPORT_SYMBOL(read_cache_page);
/*
* The logic we want is
*
* if suid or (sgid and xgrp)
* remove privs
*/
int should_remove_suid(struct dentry *dentry)
{
umode_t mode = dentry->d_inode->i_mode;
int kill = 0;
/* suid always must be killed */
if (unlikely(mode & S_ISUID))
kill = ATTR_KILL_SUID;
/*
* sgid without any exec bits is just a mandatory locking mark; leave
* it alone. If some exec bits are set, it's a real sgid; kill it.
*/
if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
kill |= ATTR_KILL_SGID;
if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
return kill;
return 0;
}
EXPORT_SYMBOL(should_remove_suid);
static int __remove_suid(struct dentry *dentry, int kill)
{
struct iattr newattrs;
newattrs.ia_valid = ATTR_FORCE | kill;
return notify_change(dentry, &newattrs);
}
int file_remove_suid(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
int killsuid;
int killpriv;
int error = 0;
/* Fast path for nothing security related */
if (IS_NOSEC(inode))
return 0;
killsuid = should_remove_suid(dentry);
killpriv = security_inode_need_killpriv(dentry);
if (killpriv < 0)
return killpriv;
if (killpriv)
error = security_inode_killpriv(dentry);
if (!error && killsuid)
error = __remove_suid(dentry, killsuid);
if (!error && (inode->i_sb->s_flags & MS_NOSEC))
inode->i_flags |= S_NOSEC;
return error;
}
EXPORT_SYMBOL(file_remove_suid);
/*
* Performs necessary checks before doing a write
*
* Can adjust writing position or amount of bytes to write.
* Returns appropriate error code that caller should return or
* zero in case that write should be allowed.
*/
inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
{
struct inode *inode = file->f_mapping->host;
unsigned long limit = rlimit(RLIMIT_FSIZE);
if (unlikely(*pos < 0))
return -EINVAL;
if (!isblk) {
/* FIXME: this is for backwards compatibility with 2.4 */
if (file->f_flags & O_APPEND)
*pos = i_size_read(inode);
if (limit != RLIM_INFINITY) {
if (*pos >= limit) {
send_sig(SIGXFSZ, current, 0);
return -EFBIG;
}
if (*count > limit - (typeof(limit))*pos) {
*count = limit - (typeof(limit))*pos;
}
}
}
/*
* LFS rule
*/
if (unlikely(*pos + *count > MAX_NON_LFS &&
!(file->f_flags & O_LARGEFILE))) {
if (*pos >= MAX_NON_LFS) {
return -EFBIG;
}
if (*count > MAX_NON_LFS - (unsigned long)*pos) {
*count = MAX_NON_LFS - (unsigned long)*pos;
}
}
/*
* Are we about to exceed the fs block limit ?
*
* If we have written data it becomes a short write. If we have
* exceeded without writing data we send a signal and return EFBIG.
* Linus frestrict idea will clean these up nicely..
*/
if (likely(!isblk)) {
if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
if (*count || *pos > inode->i_sb->s_maxbytes) {
return -EFBIG;
}
/* zero-length writes at ->s_maxbytes are OK */
}
if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
*count = inode->i_sb->s_maxbytes - *pos;
} else {
#ifdef CONFIG_BLOCK
loff_t isize;
if (bdev_read_only(I_BDEV(inode)))
return -EPERM;
isize = i_size_read(inode);
if (*pos >= isize) {
if (*count || *pos > isize)
return -ENOSPC;
}
if (*pos + *count > isize)
*count = isize - *pos;
#else
return -EPERM;
#endif
}
return 0;
}
EXPORT_SYMBOL(generic_write_checks);
int pagecache_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
const struct address_space_operations *aops = mapping->a_ops;
return aops->write_begin(file, mapping, pos, len, flags,
pagep, fsdata);
}
EXPORT_SYMBOL(pagecache_write_begin);
int pagecache_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
const struct address_space_operations *aops = mapping->a_ops;
mark_page_accessed(page);
return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
}
EXPORT_SYMBOL(pagecache_write_end);
ssize_t
generic_file_direct_write_iter(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos, loff_t *ppos, size_t count)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t written;
size_t write_len;
pgoff_t end;
if (count != iov_iter_count(iter)) {
written = iov_iter_shorten(iter, count);
if (written)
goto out;
}
write_len = count;
end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
if (written)
goto out;
/*
* After a write we want buffered reads to be sure to go to disk to get
* the new data. We invalidate clean cached page from the region we're
* about to write. We do this *before* the write so that we can return
* without clobbering -EIOCBQUEUED from ->direct_IO().
*/
if (mapping->nrpages) {
written = invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
/*
* If a page can not be invalidated, return 0 to fall back
* to buffered write.
*/
if (written) {
if (written == -EBUSY)
return 0;
goto out;
}
}
written = mapping->a_ops->direct_IO(WRITE, iocb, iter, pos);
/*
* Finally, try again to invalidate clean pages which might have been
* cached by non-direct readahead, or faulted in by get_user_pages()
* if the source of the write was an mmap'ed region of the file
* we're writing. Either one is a pretty crazy thing to do,
* so we don't support it 100%. If this invalidation
* fails, tough, the write still worked...
*/
if (mapping->nrpages) {
invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
}
if (written > 0) {
pos += written;
if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
*ppos = pos;
}
out:
return written;
}
EXPORT_SYMBOL(generic_file_direct_write_iter);
ssize_t
generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long *nr_segs, loff_t pos, loff_t *ppos,
size_t count, size_t ocount)
{
struct iov_iter iter;
ssize_t ret;
iov_iter_init(&iter, iov, *nr_segs, ocount, 0);
ret = generic_file_direct_write_iter(iocb, &iter, pos, ppos, count);
/* generic_file_direct_write_iter() might have shortened the vec */
if (*nr_segs != iter.nr_segs)
*nr_segs = iter.nr_segs;
return ret;
}
EXPORT_SYMBOL(generic_file_direct_write);
/*
* Find or create a page at the given pagecache position. Return the locked
* page. This function is specifically for buffered writes.
*/
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags)
{
int status;
gfp_t gfp_mask;
struct page *page;
gfp_t gfp_notmask = 0;
gfp_mask = mapping_gfp_mask(mapping);
if (mapping_cap_account_dirty(mapping))
gfp_mask |= __GFP_WRITE;
if (flags & AOP_FLAG_NOFS)
gfp_notmask = __GFP_FS;
repeat:
page = find_lock_page(mapping, index);
if (page)
goto found;
retry:
page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
if (!page)
return NULL;
if (is_cma_pageblock(page)) {
__free_page(page);
gfp_notmask |= __GFP_MOVABLE;
goto retry;
}
status = add_to_page_cache_lru(page, mapping, index,
GFP_KERNEL & ~gfp_notmask);
if (unlikely(status)) {
page_cache_release(page);
if (status == -EEXIST)
goto repeat;
return NULL;
}
found:
wait_on_page_writeback(page);
return page;
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
static ssize_t generic_perform_write(struct file *file,
struct iov_iter *i, loff_t pos)
{
struct address_space *mapping = file->f_mapping;
const struct address_space_operations *a_ops = mapping->a_ops;
long status = 0;
ssize_t written = 0;
unsigned int flags = 0;
/*
* Copies from kernel address space cannot fail (NFSD is a big user).
*/
if (segment_eq(get_fs(), KERNEL_DS))
flags |= AOP_FLAG_UNINTERRUPTIBLE;
do {
struct page *page;
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
size_t copied; /* Bytes copied from user */
void *fsdata;
offset = (pos & (PAGE_CACHE_SIZE - 1));
bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
iov_iter_count(i));
again:
/*
* Bring in the user page that we will copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
* same page as we're writing to, without it being marked
* up-to-date.
*
* Not only is this an optimisation, but it is also required
* to check that the address is actually valid, when atomic
* usercopies are used, below.
*/
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
status = -EFAULT;
break;
}
status = a_ops->write_begin(file, mapping, pos, bytes, flags,
&page, &fsdata);
if (unlikely(status))
break;
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
pagefault_disable();
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
pagefault_enable();
flush_dcache_page(page);
mark_page_accessed(page);
status = a_ops->write_end(file, mapping, pos, bytes, copied,
page, fsdata);
if (unlikely(status < 0))
break;
copied = status;
cond_resched();
iov_iter_advance(i, copied);
if (unlikely(copied == 0)) {
/*
* If we were unable to copy any data at all, we must
* fall back to a single segment length write.
*
* If we didn't fallback here, we could livelock
* because not all segments in the iov can be copied at
* once without a pagefault.
*/
bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
iov_iter_single_seg_count(i));
goto again;
}
pos += copied;
written += copied;
balance_dirty_pages_ratelimited(mapping);
if (fatal_signal_pending(current)) {
status = -EINTR;
break;
}
} while (iov_iter_count(i));
return written ? written : status;
}
ssize_t
generic_file_buffered_write_iter(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos, loff_t *ppos, size_t count, ssize_t written)
{
struct file *file = iocb->ki_filp;
ssize_t status;
if ((count + written) != iov_iter_count(iter)) {
int rc = iov_iter_shorten(iter, count + written);
if (rc)
return rc;
}
status = generic_perform_write(file, iter, pos);
if (likely(status >= 0)) {
written += status;
*ppos = pos + status;
}
return written ? written : status;
}
EXPORT_SYMBOL(generic_file_buffered_write_iter);
ssize_t
generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos, loff_t *ppos,
size_t count, ssize_t written)
{
struct iov_iter iter;
iov_iter_init(&iter, iov, nr_segs, count, written);
return generic_file_buffered_write_iter(iocb, &iter, pos, ppos,
count, written);
}
EXPORT_SYMBOL(generic_file_buffered_write);
/**
* __generic_file_aio_write - write data to a file
* @iocb: IO state structure (file, offset, etc.)
* @iter: iov_iter specifying memory to write
* @ppos: position where to write
*
* This function does all the work needed for actually writing data to a
* file. It does all basic checks, removes SUID from the file, updates
* modification times and calls proper subroutines depending on whether we
* do direct IO or a standard buffered write.
*
* It expects i_mutex to be grabbed unless we work on a block device or similar
* object which does not need locking at all.
*
* This function does *not* take care of syncing data in case of O_SYNC write.
* A caller has to handle it. This is mainly due to the fact that we want to
* avoid syncing under i_mutex.
*/
ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
loff_t *ppos)
{
struct file *file = iocb->ki_filp;
struct address_space * mapping = file->f_mapping;
size_t count; /* after file limit checks */
struct inode *inode = mapping->host;
loff_t pos;
ssize_t written;
ssize_t err;
count = iov_iter_count(iter);
pos = *ppos;
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
written = 0;
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
goto out;
if (count == 0)
goto out;
err = file_remove_suid(file);
if (err)
goto out;
err = file_update_time(file);
if (err)
goto out;
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (unlikely(file->f_flags & O_DIRECT)) {
loff_t endbyte;
ssize_t written_buffered;
written = generic_file_direct_write_iter(iocb, iter, pos,
ppos, count);
if (written < 0 || written == count)
goto out;
/*
* direct-io write to a hole: fall through to buffered I/O
* for completing the rest of the request.
*/
pos += written;
count -= written;
iov_iter_advance(iter, written);
written_buffered = generic_file_buffered_write_iter(iocb, iter,
pos, ppos, count, written);
/*
* If generic_file_buffered_write() retuned a synchronous error
* then we want to return the number of bytes which were
* direct-written, or the error code if that was zero. Note
* that this differs from normal direct-io semantics, which
* will return -EFOO even if some bytes were written.
*/
if (written_buffered < 0) {
err = written_buffered;
goto out;
}
/*
* We need to ensure that the page cache pages are written to
* disk and invalidated to preserve the expected O_DIRECT
* semantics.
*/
endbyte = pos + written_buffered - written - 1;
err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
if (err == 0) {
written = written_buffered;
invalidate_mapping_pages(mapping,
pos >> PAGE_CACHE_SHIFT,
endbyte >> PAGE_CACHE_SHIFT);
} else {
/*
* We don't know how much we wrote, so just return
* the number of bytes which were direct-written
*/
}
} else {
iter->count = count;
written = generic_file_buffered_write_iter(iocb, iter,
pos, ppos, count, written);
}
out:
current->backing_dev_info = NULL;
return written ? written : err;
}
EXPORT_SYMBOL(__generic_file_write_iter);
ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
mutex_lock(&inode->i_mutex);
ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
err = generic_write_sync(file, pos, ret);
if (err < 0 && ret > 0)
ret = err;
}
return ret;
}
EXPORT_SYMBOL(generic_file_write_iter);
ssize_t
__generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos)
{
struct iov_iter iter;
size_t count;
int ret;
count = 0;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (ret)
goto out;
iov_iter_init(&iter, iov, nr_segs, count, 0);
ret = __generic_file_write_iter(iocb, &iter, ppos);
out:
return ret;
}
EXPORT_SYMBOL(__generic_file_aio_write);
/**
* generic_file_aio_write - write data to a file
* @iocb: IO state structure
* @iov: vector with data to write
* @nr_segs: number of segments in the vector
* @pos: position in file where to write
*
* This is a wrapper around __generic_file_aio_write() to be used by most
* filesystems. It takes care of syncing the file in case of O_SYNC file
* and acquires i_mutex as needed.
*/
ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct blk_plug plug;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
sb_start_write(inode->i_sb);
mutex_lock(&inode->i_mutex);
blk_start_plug(&plug);
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
err = generic_write_sync(file, pos, ret);
if (err < 0 && ret > 0)
ret = err;
}
blk_finish_plug(&plug);
sb_end_write(inode->i_sb);
return ret;
}
EXPORT_SYMBOL(generic_file_aio_write);
/**
* try_to_release_page() - release old fs-specific metadata on a page
*
* @page: the page which the kernel is trying to free
* @gfp_mask: memory allocation flags (and I/O mode)
*
* The address_space is to try to release any data against the page
* (presumably at page->private). If the release was successful, return `1'.
* Otherwise return zero.
*
* This may also be called if PG_fscache is set on a page, indicating that the
* page is known to the local caching routines.
*
* The @gfp_mask argument specifies whether I/O may be performed to release
* this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
*
*/
int try_to_release_page(struct page *page, gfp_t gfp_mask)
{
struct address_space * const mapping = page->mapping;
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
return 0;
if (mapping && mapping->a_ops->releasepage)
return mapping->a_ops->releasepage(page, gfp_mask);
return try_to_free_buffers(page);
}
EXPORT_SYMBOL(try_to_release_page);
| shengdie/Dorimanx-LG-G2-D802-Kernel | mm/filemap.c | C | gpl-2.0 | 68,410 |
/* Map (unsigned int) keys to (source file, line, column) triples.
Copyright (C) 2001-2016 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>.
In other words, you are welcome to use, share and improve this program.
You are forbidden to forbid anyone else to use, share and improve
what you give them. Help stamp out software-hoarding! */
#include "config.h"
#include "system.h"
#include "line-map.h"
#include "cpplib.h"
#include "internal.h"
#include "hashtab.h"
/* Do not track column numbers higher than this one. As a result, the
range of column_bits is [12, 18] (or 0 if column numbers are
disabled). */
const unsigned int LINE_MAP_MAX_COLUMN_NUMBER = (1U << 12);
/* Do not pack ranges if locations get higher than this.
If you change this, update:
gcc.dg/plugin/location_overflow_plugin.c
gcc.dg/plugin/location-overflow-test-*.c. */
const source_location LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES = 0x50000000;
/* Do not track column numbers if locations get higher than this.
If you change this, update:
gcc.dg/plugin/location_overflow_plugin.c
gcc.dg/plugin/location-overflow-test-*.c. */
const source_location LINE_MAP_MAX_LOCATION_WITH_COLS = 0x60000000;
/* Highest possible source location encoded within an ordinary or
macro map. */
const source_location LINE_MAP_MAX_SOURCE_LOCATION = 0x70000000;
static void trace_include (const struct line_maps *, const line_map_ordinary *);
static const line_map_ordinary * linemap_ordinary_map_lookup (struct line_maps *,
source_location);
static const line_map_macro* linemap_macro_map_lookup (struct line_maps *,
source_location);
static source_location linemap_macro_map_loc_to_def_point
(const line_map_macro *, source_location);
static source_location linemap_macro_map_loc_unwind_toward_spelling
(line_maps *set, const line_map_macro *, source_location);
static source_location linemap_macro_map_loc_to_exp_point
(const line_map_macro *, source_location);
static source_location linemap_macro_loc_to_spelling_point
(struct line_maps *, source_location, const line_map_ordinary **);
static source_location linemap_macro_loc_to_def_point (struct line_maps *,
source_location,
const line_map_ordinary **);
static source_location linemap_macro_loc_to_exp_point (struct line_maps *,
source_location,
const line_map_ordinary **);
/* Counters defined in macro.c. */
extern unsigned num_expanded_macros_counter;
extern unsigned num_macro_tokens_counter;
/* Hash function for location_adhoc_data hashtable. */
static hashval_t
location_adhoc_data_hash (const void *l)
{
const struct location_adhoc_data *lb =
(const struct location_adhoc_data *) l;
return ((hashval_t) lb->locus
+ (hashval_t) lb->src_range.m_start
+ (hashval_t) lb->src_range.m_finish
+ (size_t) lb->data);
}
/* Compare function for location_adhoc_data hashtable. */
static int
location_adhoc_data_eq (const void *l1, const void *l2)
{
const struct location_adhoc_data *lb1 =
(const struct location_adhoc_data *) l1;
const struct location_adhoc_data *lb2 =
(const struct location_adhoc_data *) l2;
return (lb1->locus == lb2->locus
&& lb1->src_range.m_start == lb2->src_range.m_start
&& lb1->src_range.m_finish == lb2->src_range.m_finish
&& lb1->data == lb2->data);
}
/* Update the hashtable when location_adhoc_data is reallocated. */
static int
location_adhoc_data_update (void **slot, void *data)
{
*((char **) slot) += *((long long *) data);
return 1;
}
/* Rebuild the hash table from the location adhoc data. */
void
rebuild_location_adhoc_htab (struct line_maps *set)
{
unsigned i;
set->location_adhoc_data_map.htab =
htab_create (100, location_adhoc_data_hash, location_adhoc_data_eq, NULL);
for (i = 0; i < set->location_adhoc_data_map.curr_loc; i++)
htab_find_slot (set->location_adhoc_data_map.htab,
set->location_adhoc_data_map.data + i, INSERT);
}
/* Helper function for get_combined_adhoc_loc.
Can the given LOCUS + SRC_RANGE and DATA pointer be stored compactly
within a source_location, without needing to use an ad-hoc location. */
static bool
can_be_stored_compactly_p (struct line_maps *set,
source_location locus,
source_range src_range,
void *data)
{
/* If there's an ad-hoc pointer, we can't store it directly in the
source_location, we need the lookaside. */
if (data)
return false;
/* We only store ranges that begin at the locus and that are sufficiently
"sane". */
if (src_range.m_start != locus)
return false;
if (src_range.m_finish < src_range.m_start)
return false;
if (src_range.m_start < RESERVED_LOCATION_COUNT)
return false;
if (locus >= LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES)
return false;
/* All 3 locations must be within ordinary maps, typically, the same
ordinary map. */
source_location lowest_macro_loc = LINEMAPS_MACRO_LOWEST_LOCATION (set);
if (locus >= lowest_macro_loc)
return false;
if (src_range.m_start >= lowest_macro_loc)
return false;
if (src_range.m_finish >= lowest_macro_loc)
return false;
/* Passed all tests. */
return true;
}
/* Combine LOCUS and DATA to a combined adhoc loc. */
source_location
get_combined_adhoc_loc (struct line_maps *set,
source_location locus,
source_range src_range,
void *data)
{
struct location_adhoc_data lb;
struct location_adhoc_data **slot;
if (IS_ADHOC_LOC (locus))
locus
= set->location_adhoc_data_map.data[locus & MAX_SOURCE_LOCATION].locus;
if (locus == 0 && data == NULL)
return 0;
/* Any ordinary locations ought to be "pure" at this point: no
compressed ranges. */
linemap_assert (locus < RESERVED_LOCATION_COUNT
|| locus >= LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES
|| locus >= LINEMAPS_MACRO_LOWEST_LOCATION (set)
|| pure_location_p (set, locus));
/* Consider short-range optimization. */
if (can_be_stored_compactly_p (set, locus, src_range, data))
{
/* The low bits ought to be clear. */
linemap_assert (pure_location_p (set, locus));
const line_map *map = linemap_lookup (set, locus);
const line_map_ordinary *ordmap = linemap_check_ordinary (map);
unsigned int int_diff = src_range.m_finish - src_range.m_start;
unsigned int col_diff = (int_diff >> ordmap->m_range_bits);
if (col_diff < (1U << ordmap->m_range_bits))
{
source_location packed = locus | col_diff;
set->num_optimized_ranges++;
return packed;
}
}
/* We can also compactly store locations
when locus == start == finish (and data is NULL). */
if (locus == src_range.m_start
&& locus == src_range.m_finish
&& !data)
return locus;
if (!data)
set->num_unoptimized_ranges++;
lb.locus = locus;
lb.src_range = src_range;
lb.data = data;
slot = (struct location_adhoc_data **)
htab_find_slot (set->location_adhoc_data_map.htab, &lb, INSERT);
if (*slot == NULL)
{
if (set->location_adhoc_data_map.curr_loc >=
set->location_adhoc_data_map.allocated)
{
char *orig_data = (char *) set->location_adhoc_data_map.data;
long long offset;
/* Cast away extern "C" from the type of xrealloc. */
line_map_realloc reallocator = (set->reallocator
? set->reallocator
: (line_map_realloc) xrealloc);
if (set->location_adhoc_data_map.allocated == 0)
set->location_adhoc_data_map.allocated = 128;
else
set->location_adhoc_data_map.allocated *= 2;
set->location_adhoc_data_map.data = (struct location_adhoc_data *)
reallocator (set->location_adhoc_data_map.data,
set->location_adhoc_data_map.allocated
* sizeof (struct location_adhoc_data));
offset = (char *) (set->location_adhoc_data_map.data) - orig_data;
if (set->location_adhoc_data_map.allocated > 128)
htab_traverse (set->location_adhoc_data_map.htab,
location_adhoc_data_update, &offset);
}
*slot = set->location_adhoc_data_map.data
+ set->location_adhoc_data_map.curr_loc;
set->location_adhoc_data_map.data[set->location_adhoc_data_map.curr_loc++]
= lb;
}
return ((*slot) - set->location_adhoc_data_map.data) | 0x80000000;
}
/* Return the data for the adhoc loc. */
void *
get_data_from_adhoc_loc (struct line_maps *set, source_location loc)
{
linemap_assert (IS_ADHOC_LOC (loc));
return set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].data;
}
/* Return the location for the adhoc loc. */
source_location
get_location_from_adhoc_loc (struct line_maps *set, source_location loc)
{
linemap_assert (IS_ADHOC_LOC (loc));
return set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
}
/* Return the source_range for adhoc location LOC. */
static source_range
get_range_from_adhoc_loc (struct line_maps *set, source_location loc)
{
linemap_assert (IS_ADHOC_LOC (loc));
return set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].src_range;
}
/* Get the source_range of location LOC, either from the ad-hoc
lookaside table, or embedded inside LOC itself. */
source_range
get_range_from_loc (struct line_maps *set,
source_location loc)
{
if (IS_ADHOC_LOC (loc))
return get_range_from_adhoc_loc (set, loc);
/* For ordinary maps, extract packed range. */
if (loc >= RESERVED_LOCATION_COUNT
&& loc < LINEMAPS_MACRO_LOWEST_LOCATION (set)
&& loc <= LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES)
{
const line_map *map = linemap_lookup (set, loc);
const line_map_ordinary *ordmap = linemap_check_ordinary (map);
source_range result;
int offset = loc & ((1 << ordmap->m_range_bits) - 1);
result.m_start = loc - offset;
result.m_finish = result.m_start + (offset << ordmap->m_range_bits);
return result;
}
return source_range::from_location (loc);
}
/* Get whether location LOC is a "pure" location, or
whether it is an ad-hoc location, or embeds range information. */
bool
pure_location_p (line_maps *set, source_location loc)
{
if (IS_ADHOC_LOC (loc))
return false;
const line_map *map = linemap_lookup (set, loc);
const line_map_ordinary *ordmap = linemap_check_ordinary (map);
if (loc & ((1U << ordmap->m_range_bits) - 1))
return false;
return true;
}
/* Finalize the location_adhoc_data structure. */
void
location_adhoc_data_fini (struct line_maps *set)
{
htab_delete (set->location_adhoc_data_map.htab);
}
/* Initialize a line map set. */
void
linemap_init (struct line_maps *set,
source_location builtin_location)
{
memset (set, 0, sizeof (struct line_maps));
set->highest_location = RESERVED_LOCATION_COUNT - 1;
set->highest_line = RESERVED_LOCATION_COUNT - 1;
set->location_adhoc_data_map.htab =
htab_create (100, location_adhoc_data_hash, location_adhoc_data_eq, NULL);
set->builtin_location = builtin_location;
}
/* Check for and warn about line_maps entered but not exited. */
void
linemap_check_files_exited (struct line_maps *set)
{
const line_map_ordinary *map;
/* Depending upon whether we are handling preprocessed input or
not, this can be a user error or an ICE. */
for (map = LINEMAPS_LAST_ORDINARY_MAP (set);
! MAIN_FILE_P (map);
map = INCLUDED_FROM (set, map))
fprintf (stderr, "line-map.c: file \"%s\" entered but not left\n",
ORDINARY_MAP_FILE_NAME (map));
}
/* Create a new line map in the line map set SET, and return it.
REASON is the reason of creating the map. It determines the type
of map created (ordinary or macro map). Note that ordinary maps and
macro maps are allocated in different memory location. */
static struct line_map *
new_linemap (struct line_maps *set,
enum lc_reason reason)
{
/* Depending on this variable, a macro map would be allocated in a
different memory location than an ordinary map. */
bool macro_map_p = (reason == LC_ENTER_MACRO);
struct line_map *result;
if (LINEMAPS_USED (set, macro_map_p) == LINEMAPS_ALLOCATED (set, macro_map_p))
{
/* We ran out of allocated line maps. Let's allocate more. */
unsigned alloc_size;
/* Cast away extern "C" from the type of xrealloc. */
line_map_realloc reallocator = (set->reallocator
? set->reallocator
: (line_map_realloc) xrealloc);
line_map_round_alloc_size_func round_alloc_size =
set->round_alloc_size;
size_t map_size = (macro_map_p
? sizeof (line_map_macro)
: sizeof (line_map_ordinary));
/* We are going to execute some dance to try to reduce the
overhead of the memory allocator, in case we are using the
ggc-page.c one.
The actual size of memory we are going to get back from the
allocator is the smallest power of 2 that is greater than the
size we requested. So let's consider that size then. */
alloc_size =
(2 * LINEMAPS_ALLOCATED (set, macro_map_p) + 256)
* map_size;
/* Get the actual size of memory that is going to be allocated
by the allocator. */
alloc_size = round_alloc_size (alloc_size);
/* Now alloc_size contains the exact memory size we would get if
we have asked for the initial alloc_size amount of memory.
Let's get back to the number of macro map that amounts
to. */
LINEMAPS_ALLOCATED (set, macro_map_p) =
alloc_size / map_size;
/* And now let's really do the re-allocation. */
if (macro_map_p)
{
set->info_macro.maps
= (line_map_macro *) (*reallocator) (set->info_macro.maps,
(LINEMAPS_ALLOCATED (set, macro_map_p)
* map_size));
result = &set->info_macro.maps[LINEMAPS_USED (set, macro_map_p)];
}
else
{
set->info_ordinary.maps =
(line_map_ordinary *) (*reallocator) (set->info_ordinary.maps,
(LINEMAPS_ALLOCATED (set, macro_map_p)
* map_size));
result = &set->info_ordinary.maps[LINEMAPS_USED (set, macro_map_p)];
}
memset (result, 0,
((LINEMAPS_ALLOCATED (set, macro_map_p)
- LINEMAPS_USED (set, macro_map_p))
* map_size));
}
else
{
if (macro_map_p)
result = &set->info_macro.maps[LINEMAPS_USED (set, macro_map_p)];
else
result = &set->info_ordinary.maps[LINEMAPS_USED (set, macro_map_p)];
}
LINEMAPS_USED (set, macro_map_p)++;
result->reason = reason;
return result;
}
/* Add a mapping of logical source line to physical source file and
line number.
The text pointed to by TO_FILE must have a lifetime
at least as long as the final call to lookup_line (). An empty
TO_FILE means standard input. If reason is LC_LEAVE, and
TO_FILE is NULL, then TO_FILE, TO_LINE and SYSP are given their
natural values considering the file we are returning to.
FROM_LINE should be monotonic increasing across calls to this
function. A call to this function can relocate the previous set of
maps, so any stored line_map pointers should not be used. */
const struct line_map *
linemap_add (struct line_maps *set, enum lc_reason reason,
unsigned int sysp, const char *to_file, linenum_type to_line)
{
/* Generate a start_location above the current highest_location.
If possible, make the low range bits be zero. */
source_location start_location;
if (set->highest_location < LINE_MAP_MAX_LOCATION_WITH_COLS)
{
start_location = set->highest_location + (1 << set->default_range_bits);
if (set->default_range_bits)
start_location &= ~((1 << set->default_range_bits) - 1);
linemap_assert (0 == (start_location
& ((1 << set->default_range_bits) - 1)));
}
else
start_location = set->highest_location + 1;
linemap_assert (!(LINEMAPS_ORDINARY_USED (set)
&& (start_location
< MAP_START_LOCATION (LINEMAPS_LAST_ORDINARY_MAP (set)))));
/* When we enter the file for the first time reason cannot be
LC_RENAME. */
linemap_assert (!(set->depth == 0 && reason == LC_RENAME));
/* If we are leaving the main file, return a NULL map. */
if (reason == LC_LEAVE
&& MAIN_FILE_P (LINEMAPS_LAST_ORDINARY_MAP (set))
&& to_file == NULL)
{
set->depth--;
return NULL;
}
linemap_assert (reason != LC_ENTER_MACRO);
line_map_ordinary *map = linemap_check_ordinary (new_linemap (set, reason));
if (to_file && *to_file == '\0' && reason != LC_RENAME_VERBATIM)
to_file = "<stdin>";
if (reason == LC_RENAME_VERBATIM)
reason = LC_RENAME;
if (reason == LC_LEAVE)
{
/* When we are just leaving an "included" file, and jump to the next
location inside the "includer" right after the #include
"included", this variable points the map in use right before the
#include "included", inside the same "includer" file. */
line_map_ordinary *from;
bool error;
if (MAIN_FILE_P (map - 1))
{
/* So this _should_ mean we are leaving the main file --
effectively ending the compilation unit. But to_file not
being NULL means the caller thinks we are leaving to
another file. This is an erroneous behaviour but we'll
try to recover from it. Let's pretend we are not leaving
the main file. */
error = true;
reason = LC_RENAME;
from = map - 1;
}
else
{
/* (MAP - 1) points to the map we are leaving. The
map from which (MAP - 1) got included should be the map
that comes right before MAP in the same file. */
from = INCLUDED_FROM (set, map - 1);
error = to_file && filename_cmp (ORDINARY_MAP_FILE_NAME (from),
to_file);
}
/* Depending upon whether we are handling preprocessed input or
not, this can be a user error or an ICE. */
if (error)
fprintf (stderr, "line-map.c: file \"%s\" left but not entered\n",
to_file);
/* A TO_FILE of NULL is special - we use the natural values. */
if (error || to_file == NULL)
{
to_file = ORDINARY_MAP_FILE_NAME (from);
to_line = SOURCE_LINE (from, from[1].start_location);
sysp = ORDINARY_MAP_IN_SYSTEM_HEADER_P (from);
}
}
map->sysp = sysp;
map->start_location = start_location;
map->to_file = to_file;
map->to_line = to_line;
LINEMAPS_ORDINARY_CACHE (set) = LINEMAPS_ORDINARY_USED (set) - 1;
map->m_column_and_range_bits = 0;
map->m_range_bits = 0;
set->highest_location = start_location;
set->highest_line = start_location;
set->max_column_hint = 0;
/* This assertion is placed after set->highest_location has
been updated, since the latter affects
linemap_location_from_macro_expansion_p, which ultimately affects
pure_location_p. */
linemap_assert (pure_location_p (set, start_location));
if (reason == LC_ENTER)
{
map->included_from =
set->depth == 0 ? -1 : (int) (LINEMAPS_ORDINARY_USED (set) - 2);
set->depth++;
if (set->trace_includes)
trace_include (set, map);
}
else if (reason == LC_RENAME)
map->included_from = ORDINARY_MAP_INCLUDER_FILE_INDEX (&map[-1]);
else if (reason == LC_LEAVE)
{
set->depth--;
map->included_from =
ORDINARY_MAP_INCLUDER_FILE_INDEX (INCLUDED_FROM (set, map - 1));
}
return map;
}
/* Returns TRUE if the line table set tracks token locations across
macro expansion, FALSE otherwise. */
bool
linemap_tracks_macro_expansion_locs_p (struct line_maps *set)
{
return LINEMAPS_MACRO_MAPS (set) != NULL;
}
/* Create a macro map. A macro map encodes source locations of tokens
that are part of a macro replacement-list, at a macro expansion
point. See the extensive comments of struct line_map and struct
line_map_macro, in line-map.h.
This map shall be created when the macro is expanded. The map
encodes the source location of the expansion point of the macro as
well as the "original" source location of each token that is part
of the macro replacement-list. If a macro is defined but never
expanded, it has no macro map. SET is the set of maps the macro
map should be part of. MACRO_NODE is the macro which the new macro
map should encode source locations for. EXPANSION is the location
of the expansion point of MACRO. For function-like macros
invocations, it's best to make it point to the closing parenthesis
of the macro, rather than the the location of the first character
of the macro. NUM_TOKENS is the number of tokens that are part of
the replacement-list of MACRO.
Note that when we run out of the integer space available for source
locations, this function returns NULL. In that case, callers of
this function cannot encode {line,column} pairs into locations of
macro tokens anymore. */
const line_map_macro *
linemap_enter_macro (struct line_maps *set, struct cpp_hashnode *macro_node,
source_location expansion, unsigned int num_tokens)
{
line_map_macro *map;
source_location start_location;
/* Cast away extern "C" from the type of xrealloc. */
line_map_realloc reallocator = (set->reallocator
? set->reallocator
: (line_map_realloc) xrealloc);
start_location = LINEMAPS_MACRO_LOWEST_LOCATION (set) - num_tokens;
if (start_location <= set->highest_line
|| start_location > LINEMAPS_MACRO_LOWEST_LOCATION (set))
/* We ran out of macro map space. */
return NULL;
map = linemap_check_macro (new_linemap (set, LC_ENTER_MACRO));
map->start_location = start_location;
map->macro = macro_node;
map->n_tokens = num_tokens;
map->macro_locations
= (source_location*) reallocator (NULL,
2 * num_tokens
* sizeof (source_location));
map->expansion = expansion;
memset (MACRO_MAP_LOCATIONS (map), 0,
num_tokens * sizeof (source_location));
LINEMAPS_MACRO_CACHE (set) = LINEMAPS_MACRO_USED (set) - 1;
return map;
}
/* Create and return a virtual location for a token that is part of a
macro expansion-list at a macro expansion point. See the comment
inside struct line_map_macro to see what an expansion-list exactly
is.
A call to this function must come after a call to
linemap_enter_macro.
MAP is the map into which the source location is created. TOKEN_NO
is the index of the token in the macro replacement-list, starting
at number 0.
ORIG_LOC is the location of the token outside of this macro
expansion. If the token comes originally from the macro
definition, it is the locus in the macro definition; otherwise it
is a location in the context of the caller of this macro expansion
(which is a virtual location or a source location if the caller is
itself a macro expansion or not).
ORIG_PARM_REPLACEMENT_LOC is the location in the macro definition,
either of the token itself or of a macro parameter that it
replaces. */
source_location
linemap_add_macro_token (const line_map_macro *map,
unsigned int token_no,
source_location orig_loc,
source_location orig_parm_replacement_loc)
{
source_location result;
linemap_assert (linemap_macro_expansion_map_p (map));
linemap_assert (token_no < MACRO_MAP_NUM_MACRO_TOKENS (map));
MACRO_MAP_LOCATIONS (map)[2 * token_no] = orig_loc;
MACRO_MAP_LOCATIONS (map)[2 * token_no + 1] = orig_parm_replacement_loc;
result = MAP_START_LOCATION (map) + token_no;
return result;
}
/* Return a source_location for the start (i.e. column==0) of
(physical) line TO_LINE in the current source file (as in the
most recent linemap_add). MAX_COLUMN_HINT is the highest column
number we expect to use in this line (but it does not change
the highest_location). */
source_location
linemap_line_start (struct line_maps *set, linenum_type to_line,
unsigned int max_column_hint)
{
line_map_ordinary *map = LINEMAPS_LAST_ORDINARY_MAP (set);
source_location highest = set->highest_location;
source_location r;
linenum_type last_line =
SOURCE_LINE (map, set->highest_line);
int line_delta = to_line - last_line;
bool add_map = false;
linemap_assert (map->m_column_and_range_bits >= map->m_range_bits);
int effective_column_bits = map->m_column_and_range_bits - map->m_range_bits;
if (line_delta < 0
|| (line_delta > 10
&& line_delta * map->m_column_and_range_bits > 1000)
|| (max_column_hint >= (1U << effective_column_bits))
|| (max_column_hint <= 80 && effective_column_bits >= 10)
|| (highest > LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES
&& map->m_range_bits > 0)
|| (highest > LINE_MAP_MAX_LOCATION_WITH_COLS
&& (set->max_column_hint || highest >= LINE_MAP_MAX_SOURCE_LOCATION)))
add_map = true;
else
max_column_hint = set->max_column_hint;
if (add_map)
{
int column_bits;
int range_bits;
if (max_column_hint > LINE_MAP_MAX_COLUMN_NUMBER
|| highest > LINE_MAP_MAX_LOCATION_WITH_COLS)
{
/* If the column number is ridiculous or we've allocated a huge
number of source_locations, give up on column numbers
(and on packed ranges). */
max_column_hint = 0;
column_bits = 0;
range_bits = 0;
if (highest > LINE_MAP_MAX_SOURCE_LOCATION)
return 0;
}
else
{
column_bits = 7;
if (highest <= LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES)
range_bits = set->default_range_bits;
else
range_bits = 0;
while (max_column_hint >= (1U << column_bits))
column_bits++;
max_column_hint = 1U << column_bits;
column_bits += range_bits;
}
/* Allocate the new line_map. However, if the current map only has a
single line we can sometimes just increase its column_bits instead. */
if (line_delta < 0
|| last_line != ORDINARY_MAP_STARTING_LINE_NUMBER (map)
|| SOURCE_COLUMN (map, highest) >= (1U << column_bits)
|| range_bits < map->m_range_bits)
map = linemap_check_ordinary
(const_cast <line_map *>
(linemap_add (set, LC_RENAME,
ORDINARY_MAP_IN_SYSTEM_HEADER_P (map),
ORDINARY_MAP_FILE_NAME (map),
to_line)));
map->m_column_and_range_bits = column_bits;
map->m_range_bits = range_bits;
r = (MAP_START_LOCATION (map)
+ ((to_line - ORDINARY_MAP_STARTING_LINE_NUMBER (map))
<< column_bits));
}
else
r = set->highest_line + (line_delta << map->m_column_and_range_bits);
/* Locations of ordinary tokens are always lower than locations of
macro tokens. */
if (r >= LINEMAPS_MACRO_LOWEST_LOCATION (set))
return 0;
set->highest_line = r;
if (r > set->highest_location)
set->highest_location = r;
set->max_column_hint = max_column_hint;
/* At this point, we expect one of:
(a) the normal case: a "pure" location with 0 range bits, or
(b) we've gone past LINE_MAP_MAX_LOCATION_WITH_COLS so can't track
columns anymore (or ranges), or
(c) we're in a region with a column hint exceeding
LINE_MAP_MAX_COLUMN_NUMBER, so column-tracking is off,
with column_bits == 0. */
linemap_assert (pure_location_p (set, r)
|| r >= LINE_MAP_MAX_LOCATION_WITH_COLS
|| map->m_column_and_range_bits == 0);
linemap_assert (SOURCE_LINE (map, r) == to_line);
return r;
}
/* Encode and return a source_location from a column number. The
source line considered is the last source line used to call
linemap_line_start, i.e, the last source line which a location was
encoded from. */
source_location
linemap_position_for_column (struct line_maps *set, unsigned int to_column)
{
source_location r = set->highest_line;
linemap_assert
(!linemap_macro_expansion_map_p (LINEMAPS_LAST_ORDINARY_MAP (set)));
if (to_column >= set->max_column_hint)
{
if (r > LINE_MAP_MAX_LOCATION_WITH_COLS
|| to_column > LINE_MAP_MAX_COLUMN_NUMBER)
{
/* Running low on source_locations - disable column numbers. */
return r;
}
else
{
line_map_ordinary *map = LINEMAPS_LAST_ORDINARY_MAP (set);
r = linemap_line_start (set, SOURCE_LINE (map, r), to_column + 50);
}
}
line_map_ordinary *map = LINEMAPS_LAST_ORDINARY_MAP (set);
r = r + (to_column << map->m_range_bits);
if (r >= set->highest_location)
set->highest_location = r;
return r;
}
/* Encode and return a source location from a given line and
column. */
source_location
linemap_position_for_line_and_column (line_maps *set,
const line_map_ordinary *ord_map,
linenum_type line,
unsigned column)
{
linemap_assert (ORDINARY_MAP_STARTING_LINE_NUMBER (ord_map) <= line);
source_location r = MAP_START_LOCATION (ord_map);
r += ((line - ORDINARY_MAP_STARTING_LINE_NUMBER (ord_map))
<< ord_map->m_column_and_range_bits);
if (r <= LINE_MAP_MAX_LOCATION_WITH_COLS)
r += ((column & ((1 << ord_map->m_column_and_range_bits) - 1))
<< ord_map->m_range_bits);
source_location upper_limit = LINEMAPS_MACRO_LOWEST_LOCATION (set);
if (r >= upper_limit)
r = upper_limit - 1;
if (r > set->highest_location)
set->highest_location = r;
return r;
}
/* Encode and return a source_location starting from location LOC and
shifting it by OFFSET columns. This function does not support
virtual locations. */
source_location
linemap_position_for_loc_and_offset (struct line_maps *set,
source_location loc,
unsigned int offset)
{
const line_map_ordinary * map = NULL;
if (IS_ADHOC_LOC (loc))
loc = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
/* This function does not support virtual locations yet. */
if (linemap_assert_fails
(!linemap_location_from_macro_expansion_p (set, loc)))
return loc;
if (offset == 0
/* Adding an offset to a reserved location (like
UNKNOWN_LOCATION for the C/C++ FEs) does not really make
sense. So let's leave the location intact in that case. */
|| loc < RESERVED_LOCATION_COUNT)
return loc;
/* We find the real location and shift it. */
loc = linemap_resolve_location (set, loc, LRK_SPELLING_LOCATION, &map);
/* The new location (loc + offset) should be higher than the first
location encoded by MAP. This can fail if the line information
is messed up because of line directives (see PR66415). */
if (MAP_START_LOCATION (map) >= loc + offset)
return loc;
linenum_type line = SOURCE_LINE (map, loc);
unsigned int column = SOURCE_COLUMN (map, loc);
/* If MAP is not the last line map of its set, then the new location
(loc + offset) should be less than the first location encoded by
the next line map of the set. Otherwise, we try to encode the
location in the next map. */
while (map != LINEMAPS_LAST_ORDINARY_MAP (set)
&& loc + offset >= MAP_START_LOCATION (&map[1]))
{
map = &map[1];
/* If the next map starts in a higher line, we cannot encode the
location there. */
if (line < ORDINARY_MAP_STARTING_LINE_NUMBER (map))
return loc;
}
offset += column;
if (linemap_assert_fails (offset < (1u << map->m_column_and_range_bits)))
return loc;
source_location r =
linemap_position_for_line_and_column (set, map, line, offset);
if (linemap_assert_fails (r <= set->highest_location)
|| linemap_assert_fails (map == linemap_lookup (set, r)))
return loc;
return r;
}
/* Given a virtual source location yielded by a map (either an
ordinary or a macro map), returns that map. */
const struct line_map*
linemap_lookup (struct line_maps *set, source_location line)
{
if (IS_ADHOC_LOC (line))
line = set->location_adhoc_data_map.data[line & MAX_SOURCE_LOCATION].locus;
if (linemap_location_from_macro_expansion_p (set, line))
return linemap_macro_map_lookup (set, line);
return linemap_ordinary_map_lookup (set, line);
}
/* Given a source location yielded by an ordinary map, returns that
map. Since the set is built chronologically, the logical lines are
monotonic increasing, and so the list is sorted and we can use a
binary search. */
static const line_map_ordinary *
linemap_ordinary_map_lookup (struct line_maps *set, source_location line)
{
unsigned int md, mn, mx;
const line_map_ordinary *cached, *result;
if (IS_ADHOC_LOC (line))
line = set->location_adhoc_data_map.data[line & MAX_SOURCE_LOCATION].locus;
if (set == NULL || line < RESERVED_LOCATION_COUNT)
return NULL;
mn = LINEMAPS_ORDINARY_CACHE (set);
mx = LINEMAPS_ORDINARY_USED (set);
cached = LINEMAPS_ORDINARY_MAP_AT (set, mn);
/* We should get a segfault if no line_maps have been added yet. */
if (line >= MAP_START_LOCATION (cached))
{
if (mn + 1 == mx || line < MAP_START_LOCATION (&cached[1]))
return cached;
}
else
{
mx = mn;
mn = 0;
}
while (mx - mn > 1)
{
md = (mn + mx) / 2;
if (MAP_START_LOCATION (LINEMAPS_ORDINARY_MAP_AT (set, md)) > line)
mx = md;
else
mn = md;
}
LINEMAPS_ORDINARY_CACHE (set) = mn;
result = LINEMAPS_ORDINARY_MAP_AT (set, mn);
linemap_assert (line >= MAP_START_LOCATION (result));
return result;
}
/* Given a source location yielded by a macro map, returns that map.
Since the set is built chronologically, the logical lines are
monotonic decreasing, and so the list is sorted and we can use a
binary search. */
static const line_map_macro *
linemap_macro_map_lookup (struct line_maps *set, source_location line)
{
unsigned int md, mn, mx;
const struct line_map_macro *cached, *result;
if (IS_ADHOC_LOC (line))
line = set->location_adhoc_data_map.data[line & MAX_SOURCE_LOCATION].locus;
linemap_assert (line >= LINEMAPS_MACRO_LOWEST_LOCATION (set));
if (set == NULL)
return NULL;
mn = LINEMAPS_MACRO_CACHE (set);
mx = LINEMAPS_MACRO_USED (set);
cached = LINEMAPS_MACRO_MAP_AT (set, mn);
if (line >= MAP_START_LOCATION (cached))
{
if (mn == 0 || line < MAP_START_LOCATION (&cached[-1]))
return cached;
mx = mn - 1;
mn = 0;
}
while (mn < mx)
{
md = (mx + mn) / 2;
if (MAP_START_LOCATION (LINEMAPS_MACRO_MAP_AT (set, md)) > line)
mn = md + 1;
else
mx = md;
}
LINEMAPS_MACRO_CACHE (set) = mx;
result = LINEMAPS_MACRO_MAP_AT (set, LINEMAPS_MACRO_CACHE (set));
linemap_assert (MAP_START_LOCATION (result) <= line);
return result;
}
/* Return TRUE if MAP encodes locations coming from a macro
replacement-list at macro expansion point. */
bool
linemap_macro_expansion_map_p (const struct line_map *map)
{
if (!map)
return false;
return (map->reason == LC_ENTER_MACRO);
}
/* If LOCATION is the locus of a token in a replacement-list of a
macro expansion return the location of the macro expansion point.
Read the comments of struct line_map and struct line_map_macro in
line-map.h to understand what a macro expansion point is. */
static source_location
linemap_macro_map_loc_to_exp_point (const line_map_macro *map,
source_location location ATTRIBUTE_UNUSED)
{
linemap_assert (linemap_macro_expansion_map_p (map)
&& location >= MAP_START_LOCATION (map));
/* Make sure LOCATION is correct. */
linemap_assert ((location - MAP_START_LOCATION (map))
< MACRO_MAP_NUM_MACRO_TOKENS (map));
return MACRO_MAP_EXPANSION_POINT_LOCATION (map);
}
/* LOCATION is the source location of a token that belongs to a macro
replacement-list as part of the macro expansion denoted by MAP.
Return the location of the token at the definition point of the
macro. */
static source_location
linemap_macro_map_loc_to_def_point (const line_map_macro *map,
source_location location)
{
unsigned token_no;
linemap_assert (linemap_macro_expansion_map_p (map)
&& location >= MAP_START_LOCATION (map));
linemap_assert (location >= RESERVED_LOCATION_COUNT);
token_no = location - MAP_START_LOCATION (map);
linemap_assert (token_no < MACRO_MAP_NUM_MACRO_TOKENS (map));
location = MACRO_MAP_LOCATIONS (map)[2 * token_no + 1];
return location;
}
/* If LOCATION is the locus of a token that is an argument of a
function-like macro M and appears in the expansion of M, return the
locus of that argument in the context of the caller of M.
In other words, this returns the xI location presented in the
comments of line_map_macro above. */
source_location
linemap_macro_map_loc_unwind_toward_spelling (line_maps *set,
const line_map_macro* map,
source_location location)
{
unsigned token_no;
if (IS_ADHOC_LOC (location))
location = get_location_from_adhoc_loc (set, location);
linemap_assert (linemap_macro_expansion_map_p (map)
&& location >= MAP_START_LOCATION (map));
linemap_assert (location >= RESERVED_LOCATION_COUNT);
linemap_assert (!IS_ADHOC_LOC (location));
token_no = location - MAP_START_LOCATION (map);
linemap_assert (token_no < MACRO_MAP_NUM_MACRO_TOKENS (map));
location = MACRO_MAP_LOCATIONS (map)[2 * token_no];
return location;
}
/* Return the source line number corresponding to source location
LOCATION. SET is the line map set LOCATION comes from. If
LOCATION is the source location of token that is part of the
replacement-list of a macro expansion return the line number of the
macro expansion point. */
int
linemap_get_expansion_line (struct line_maps *set,
source_location location)
{
const line_map_ordinary *map = NULL;
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
if (location < RESERVED_LOCATION_COUNT)
return 0;
location =
linemap_macro_loc_to_exp_point (set, location, &map);
return SOURCE_LINE (map, location);
}
/* Return the path of the file corresponding to source code location
LOCATION.
If LOCATION is the source location of token that is part of the
replacement-list of a macro expansion return the file path of the
macro expansion point.
SET is the line map set LOCATION comes from. */
const char*
linemap_get_expansion_filename (struct line_maps *set,
source_location location)
{
const struct line_map_ordinary *map = NULL;
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
if (location < RESERVED_LOCATION_COUNT)
return NULL;
location =
linemap_macro_loc_to_exp_point (set, location, &map);
return LINEMAP_FILE (map);
}
/* Return the name of the macro associated to MACRO_MAP. */
const char*
linemap_map_get_macro_name (const line_map_macro *macro_map)
{
linemap_assert (macro_map && linemap_macro_expansion_map_p (macro_map));
return (const char*) NODE_NAME (MACRO_MAP_MACRO (macro_map));
}
/* Return a positive value if LOCATION is the locus of a token that is
located in a system header, O otherwise. It returns 1 if LOCATION
is the locus of a token that is located in a system header, and 2
if LOCATION is the locus of a token located in a C system header
that therefore needs to be extern "C" protected in C++.
Note that this function returns 1 if LOCATION belongs to a token
that is part of a macro replacement-list defined in a system
header, but expanded in a non-system file. */
int
linemap_location_in_system_header_p (struct line_maps *set,
source_location location)
{
const struct line_map *map = NULL;
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
if (location < RESERVED_LOCATION_COUNT)
return false;
/* Let's look at where the token for LOCATION comes from. */
while (true)
{
map = linemap_lookup (set, location);
if (map != NULL)
{
if (!linemap_macro_expansion_map_p (map))
/* It's a normal token. */
return LINEMAP_SYSP (linemap_check_ordinary (map));
else
{
const line_map_macro *macro_map = linemap_check_macro (map);
/* It's a token resulting from a macro expansion. */
source_location loc =
linemap_macro_map_loc_unwind_toward_spelling (set, macro_map, location);
if (loc < RESERVED_LOCATION_COUNT)
/* This token might come from a built-in macro. Let's
look at where that macro got expanded. */
location = linemap_macro_map_loc_to_exp_point (macro_map, location);
else
location = loc;
}
}
else
break;
}
return false;
}
/* Return TRUE if LOCATION is a source code location of a token coming
from a macro replacement-list at a macro expansion point, FALSE
otherwise. */
bool
linemap_location_from_macro_expansion_p (const struct line_maps *set,
source_location location)
{
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
linemap_assert (location <= MAX_SOURCE_LOCATION
&& (set->highest_location
< LINEMAPS_MACRO_LOWEST_LOCATION (set)));
if (set == NULL)
return false;
return (location > set->highest_location);
}
/* Given two virtual locations *LOC0 and *LOC1, return the first
common macro map in their macro expansion histories. Return NULL
if no common macro was found. *LOC0 (resp. *LOC1) is set to the
virtual location of the token inside the resulting macro. */
static const struct line_map*
first_map_in_common_1 (struct line_maps *set,
source_location *loc0,
source_location *loc1)
{
source_location l0 = *loc0, l1 = *loc1;
const struct line_map *map0 = linemap_lookup (set, l0),
*map1 = linemap_lookup (set, l1);
while (linemap_macro_expansion_map_p (map0)
&& linemap_macro_expansion_map_p (map1)
&& (map0 != map1))
{
if (MAP_START_LOCATION (map0) < MAP_START_LOCATION (map1))
{
l0 = linemap_macro_map_loc_to_exp_point (linemap_check_macro (map0),
l0);
map0 = linemap_lookup (set, l0);
}
else
{
l1 = linemap_macro_map_loc_to_exp_point (linemap_check_macro (map1),
l1);
map1 = linemap_lookup (set, l1);
}
}
if (map0 == map1)
{
*loc0 = l0;
*loc1 = l1;
return map0;
}
return NULL;
}
/* Given two virtual locations LOC0 and LOC1, return the first common
macro map in their macro expansion histories. Return NULL if no
common macro was found. *RES_LOC0 (resp. *RES_LOC1) is set to the
virtual location of the token inside the resulting macro, upon
return of a non-NULL result. */
static const struct line_map*
first_map_in_common (struct line_maps *set,
source_location loc0,
source_location loc1,
source_location *res_loc0,
source_location *res_loc1)
{
*res_loc0 = loc0;
*res_loc1 = loc1;
return first_map_in_common_1 (set, res_loc0, res_loc1);
}
/* Return a positive value if PRE denotes the location of a token that
comes before the token of POST, 0 if PRE denotes the location of
the same token as the token for POST, and a negative value
otherwise. */
int
linemap_compare_locations (struct line_maps *set,
source_location pre,
source_location post)
{
bool pre_virtual_p, post_virtual_p;
source_location l0 = pre, l1 = post;
if (IS_ADHOC_LOC (l0))
l0 = get_location_from_adhoc_loc (set, l0);
if (IS_ADHOC_LOC (l1))
l1 = get_location_from_adhoc_loc (set, l1);
if (l0 == l1)
return 0;
if ((pre_virtual_p = linemap_location_from_macro_expansion_p (set, l0)))
l0 = linemap_resolve_location (set, l0,
LRK_MACRO_EXPANSION_POINT,
NULL);
if ((post_virtual_p = linemap_location_from_macro_expansion_p (set, l1)))
l1 = linemap_resolve_location (set, l1,
LRK_MACRO_EXPANSION_POINT,
NULL);
if (l0 == l1
&& pre_virtual_p
&& post_virtual_p)
{
/* So pre and post represent two tokens that are present in a
same macro expansion. Let's see if the token for pre was
before the token for post in that expansion. */
unsigned i0, i1;
const struct line_map *map =
first_map_in_common (set, pre, post, &l0, &l1);
if (map == NULL)
/* This should not be possible. */
abort ();
i0 = l0 - MAP_START_LOCATION (map);
i1 = l1 - MAP_START_LOCATION (map);
return i1 - i0;
}
if (IS_ADHOC_LOC (l0))
l0 = get_location_from_adhoc_loc (set, l0);
if (IS_ADHOC_LOC (l1))
l1 = get_location_from_adhoc_loc (set, l1);
return l1 - l0;
}
/* Print an include trace, for e.g. the -H option of the preprocessor. */
static void
trace_include (const struct line_maps *set, const line_map_ordinary *map)
{
unsigned int i = set->depth;
while (--i)
putc ('.', stderr);
fprintf (stderr, " %s\n", ORDINARY_MAP_FILE_NAME (map));
}
/* Return the spelling location of the token wherever it comes from,
whether part of a macro definition or not.
This is a subroutine for linemap_resolve_location. */
static source_location
linemap_macro_loc_to_spelling_point (struct line_maps *set,
source_location location,
const line_map_ordinary **original_map)
{
struct line_map *map;
linemap_assert (set && location >= RESERVED_LOCATION_COUNT);
while (true)
{
map = const_cast <line_map *> (linemap_lookup (set, location));
if (!linemap_macro_expansion_map_p (map))
break;
location
= linemap_macro_map_loc_unwind_toward_spelling
(set, linemap_check_macro (map),
location);
}
if (original_map)
*original_map = linemap_check_ordinary (map);
return location;
}
/* If LOCATION is the source location of a token that belongs to a
macro replacement-list -- as part of a macro expansion -- then
return the location of the token at the definition point of the
macro. Otherwise, return LOCATION. SET is the set of maps
location come from. ORIGINAL_MAP is an output parm. If non NULL,
the function sets *ORIGINAL_MAP to the ordinary (non-macro) map the
returned location comes from.
This is a subroutine of linemap_resolve_location. */
static source_location
linemap_macro_loc_to_def_point (struct line_maps *set,
source_location location,
const line_map_ordinary **original_map)
{
struct line_map *map;
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
linemap_assert (set && location >= RESERVED_LOCATION_COUNT);
while (true)
{
map = const_cast <line_map *> (linemap_lookup (set, location));
if (!linemap_macro_expansion_map_p (map))
break;
location =
linemap_macro_map_loc_to_def_point (linemap_check_macro (map),
location);
}
if (original_map)
*original_map = linemap_check_ordinary (map);
return location;
}
/* If LOCATION is the source location of a token that belongs to a
macro replacement-list -- at a macro expansion point -- then return
the location of the topmost expansion point of the macro. We say
topmost because if we are in the context of a nested macro
expansion, the function returns the source location of the first
macro expansion that triggered the nested expansions.
Otherwise, return LOCATION. SET is the set of maps location come
from. ORIGINAL_MAP is an output parm. If non NULL, the function
sets *ORIGINAL_MAP to the ordinary (non-macro) map the returned
location comes from.
This is a subroutine of linemap_resolve_location. */
static source_location
linemap_macro_loc_to_exp_point (struct line_maps *set,
source_location location,
const line_map_ordinary **original_map)
{
struct line_map *map;
if (IS_ADHOC_LOC (location))
location = set->location_adhoc_data_map.data[location
& MAX_SOURCE_LOCATION].locus;
linemap_assert (set && location >= RESERVED_LOCATION_COUNT);
while (true)
{
map = const_cast <line_map *> (linemap_lookup (set, location));
if (!linemap_macro_expansion_map_p (map))
break;
location = linemap_macro_map_loc_to_exp_point (linemap_check_macro (map),
location);
}
if (original_map)
*original_map = linemap_check_ordinary (map);
return location;
}
/* Resolve a virtual location into either a spelling location, an
expansion point location or a token argument replacement point
location. Return the map that encodes the virtual location as well
as the resolved location.
If LOC is *NOT* the location of a token resulting from the
expansion of a macro, then the parameter LRK (which stands for
Location Resolution Kind) is ignored and the resulting location
just equals the one given in argument.
Now if LOC *IS* the location of a token resulting from the
expansion of a macro, this is what happens.
* If LRK is set to LRK_MACRO_EXPANSION_POINT
-------------------------------
The virtual location is resolved to the first macro expansion point
that led to this macro expansion.
* If LRK is set to LRK_SPELLING_LOCATION
-------------------------------------
The virtual location is resolved to the locus where the token has
been spelled in the source. This can follow through all the macro
expansions that led to the token.
* If LRK is set to LRK_MACRO_DEFINITION_LOCATION
--------------------------------------
The virtual location is resolved to the locus of the token in the
context of the macro definition.
If LOC is the locus of a token that is an argument of a
function-like macro [replacing a parameter in the replacement list
of the macro] the virtual location is resolved to the locus of the
parameter that is replaced, in the context of the definition of the
macro.
If LOC is the locus of a token that is not an argument of a
function-like macro, then the function behaves as if LRK was set to
LRK_SPELLING_LOCATION.
If MAP is not NULL, *MAP is set to the map encoding the
returned location. Note that if the returned location wasn't originally
encoded by a map, then *MAP is set to NULL. This can happen if LOC
resolves to a location reserved for the client code, like
UNKNOWN_LOCATION or BUILTINS_LOCATION in GCC. */
source_location
linemap_resolve_location (struct line_maps *set,
source_location loc,
enum location_resolution_kind lrk,
const line_map_ordinary **map)
{
source_location locus = loc;
if (IS_ADHOC_LOC (loc))
locus = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
if (locus < RESERVED_LOCATION_COUNT)
{
/* A reserved location wasn't encoded in a map. Let's return a
NULL map here, just like what linemap_ordinary_map_lookup
does. */
if (map)
*map = NULL;
return loc;
}
switch (lrk)
{
case LRK_MACRO_EXPANSION_POINT:
loc = linemap_macro_loc_to_exp_point (set, loc, map);
break;
case LRK_SPELLING_LOCATION:
loc = linemap_macro_loc_to_spelling_point (set, loc, map);
break;
case LRK_MACRO_DEFINITION_LOCATION:
loc = linemap_macro_loc_to_def_point (set, loc, map);
break;
default:
abort ();
}
return loc;
}
/*
Suppose that LOC is the virtual location of a token T coming from
the expansion of a macro M. This function then steps up to get the
location L of the point where M got expanded. If L is a spelling
location inside a macro expansion M', then this function returns
the locus of the point where M' was expanded. Said otherwise, this
function returns the location of T in the context that triggered
the expansion of M.
*LOC_MAP must be set to the map of LOC. This function then sets it
to the map of the returned location. */
source_location
linemap_unwind_toward_expansion (struct line_maps *set,
source_location loc,
const struct line_map **map)
{
source_location resolved_location;
const line_map_macro *macro_map = linemap_check_macro (*map);
const struct line_map *resolved_map;
if (IS_ADHOC_LOC (loc))
loc = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
resolved_location =
linemap_macro_map_loc_unwind_toward_spelling (set, macro_map, loc);
resolved_map = linemap_lookup (set, resolved_location);
if (!linemap_macro_expansion_map_p (resolved_map))
{
resolved_location = linemap_macro_map_loc_to_exp_point (macro_map, loc);
resolved_map = linemap_lookup (set, resolved_location);
}
*map = resolved_map;
return resolved_location;
}
/* If LOC is the virtual location of a token coming from the expansion
of a macro M and if its spelling location is reserved (e.g, a
location for a built-in token), then this function unwinds (using
linemap_unwind_toward_expansion) the location until a location that
is not reserved and is not in a system header is reached. In other
words, this unwinds the reserved location until a location that is
in real source code is reached.
Otherwise, if the spelling location for LOC is not reserved or if
LOC doesn't come from the expansion of a macro, the function
returns LOC as is and *MAP is not touched.
*MAP is set to the map of the returned location if the later is
different from LOC. */
source_location
linemap_unwind_to_first_non_reserved_loc (struct line_maps *set,
source_location loc,
const struct line_map **map)
{
source_location resolved_loc;
const struct line_map *map0 = NULL;
const line_map_ordinary *map1 = NULL;
if (IS_ADHOC_LOC (loc))
loc = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
map0 = linemap_lookup (set, loc);
if (!linemap_macro_expansion_map_p (map0))
return loc;
resolved_loc = linemap_resolve_location (set, loc,
LRK_SPELLING_LOCATION,
&map1);
if (resolved_loc >= RESERVED_LOCATION_COUNT
&& !LINEMAP_SYSP (map1))
return loc;
while (linemap_macro_expansion_map_p (map0)
&& (resolved_loc < RESERVED_LOCATION_COUNT
|| LINEMAP_SYSP (map1)))
{
loc = linemap_unwind_toward_expansion (set, loc, &map0);
resolved_loc = linemap_resolve_location (set, loc,
LRK_SPELLING_LOCATION,
&map1);
}
if (map != NULL)
*map = map0;
return loc;
}
/* Expand source code location LOC and return a user readable source
code location. LOC must be a spelling (non-virtual) location. If
it's a location < RESERVED_LOCATION_COUNT a zeroed expanded source
location is returned. */
expanded_location
linemap_expand_location (struct line_maps *set,
const struct line_map *map,
source_location loc)
{
expanded_location xloc;
memset (&xloc, 0, sizeof (xloc));
if (IS_ADHOC_LOC (loc))
{
xloc.data
= set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].data;
loc = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
}
if (loc < RESERVED_LOCATION_COUNT)
/* The location for this token wasn't generated from a line map.
It was probably a location for a builtin token, chosen by some
client code. Let's not try to expand the location in that
case. */;
else if (map == NULL)
/* We shouldn't be getting a NULL map with a location that is not
reserved by the client code. */
abort ();
else
{
/* MAP must be an ordinary map and LOC must be non-virtual,
encoded into this map, obviously; the accessors used on MAP
below ensure it is ordinary. Let's just assert the
non-virtualness of LOC here. */
if (linemap_location_from_macro_expansion_p (set, loc))
abort ();
const line_map_ordinary *ord_map = linemap_check_ordinary (map);
xloc.file = LINEMAP_FILE (ord_map);
xloc.line = SOURCE_LINE (ord_map, loc);
xloc.column = SOURCE_COLUMN (ord_map, loc);
xloc.sysp = LINEMAP_SYSP (ord_map) != 0;
}
return xloc;
}
/* Dump line map at index IX in line table SET to STREAM. If STREAM
is NULL, use stderr. IS_MACRO is true if the caller wants to
dump a macro map, false otherwise. */
void
linemap_dump (FILE *stream, struct line_maps *set, unsigned ix, bool is_macro)
{
const char *lc_reasons_v[LC_ENTER_MACRO + 1]
= { "LC_ENTER", "LC_LEAVE", "LC_RENAME", "LC_RENAME_VERBATIM",
"LC_ENTER_MACRO" };
const char *reason;
const line_map *map;
if (stream == NULL)
stream = stderr;
if (!is_macro)
map = LINEMAPS_ORDINARY_MAP_AT (set, ix);
else
map = LINEMAPS_MACRO_MAP_AT (set, ix);
reason = (map->reason <= LC_ENTER_MACRO) ? lc_reasons_v[map->reason] : "???";
fprintf (stream, "Map #%u [%p] - LOC: %u - REASON: %s - SYSP: %s\n",
ix, (void *) map, map->start_location, reason,
((!is_macro
&& ORDINARY_MAP_IN_SYSTEM_HEADER_P (linemap_check_ordinary (map)))
? "yes" : "no"));
if (!is_macro)
{
const line_map_ordinary *ord_map = linemap_check_ordinary (map);
unsigned includer_ix;
const line_map_ordinary *includer_map;
includer_ix = ORDINARY_MAP_INCLUDER_FILE_INDEX (ord_map);
includer_map = includer_ix < LINEMAPS_ORDINARY_USED (set)
? LINEMAPS_ORDINARY_MAP_AT (set, includer_ix)
: NULL;
fprintf (stream, "File: %s:%d\n", ORDINARY_MAP_FILE_NAME (ord_map),
ORDINARY_MAP_STARTING_LINE_NUMBER (ord_map));
fprintf (stream, "Included from: [%d] %s\n", includer_ix,
includer_map ? ORDINARY_MAP_FILE_NAME (includer_map) : "None");
}
else
{
const line_map_macro *macro_map = linemap_check_macro (map);
fprintf (stream, "Macro: %s (%u tokens)\n",
linemap_map_get_macro_name (macro_map),
MACRO_MAP_NUM_MACRO_TOKENS (macro_map));
}
fprintf (stream, "\n");
}
/* Dump debugging information about source location LOC into the file
stream STREAM. SET is the line map set LOC comes from. */
void
linemap_dump_location (struct line_maps *set,
source_location loc,
FILE *stream)
{
const line_map_ordinary *map;
source_location location;
const char *path = "", *from = "";
int l = -1, c = -1, s = -1, e = -1;
if (IS_ADHOC_LOC (loc))
loc = set->location_adhoc_data_map.data[loc & MAX_SOURCE_LOCATION].locus;
if (loc == 0)
return;
location =
linemap_resolve_location (set, loc, LRK_MACRO_DEFINITION_LOCATION, &map);
if (map == NULL)
/* Only reserved locations can be tolerated in this case. */
linemap_assert (location < RESERVED_LOCATION_COUNT);
else
{
path = LINEMAP_FILE (map);
l = SOURCE_LINE (map, location);
c = SOURCE_COLUMN (map, location);
s = LINEMAP_SYSP (map) != 0;
e = location != loc;
if (e)
from = "N/A";
else
from = (INCLUDED_FROM (set, map))
? LINEMAP_FILE (INCLUDED_FROM (set, map))
: "<NULL>";
}
/* P: path, L: line, C: column, S: in-system-header, M: map address,
E: macro expansion?, LOC: original location, R: resolved location */
fprintf (stream, "{P:%s;F:%s;L:%d;C:%d;S:%d;M:%p;E:%d,LOC:%d,R:%d}",
path, from, l, c, s, (void*)map, e, loc, location);
}
/* Return the highest location emitted for a given file for which
there is a line map in SET. FILE_NAME is the file name to
consider. If the function returns TRUE, *LOC is set to the highest
location emitted for that file. */
bool
linemap_get_file_highest_location (struct line_maps *set,
const char *file_name,
source_location *loc)
{
/* If the set is empty or no ordinary map has been created then
there is no file to look for ... */
if (set == NULL || set->info_ordinary.used == 0)
return false;
/* Now look for the last ordinary map created for FILE_NAME. */
int i;
for (i = set->info_ordinary.used - 1; i >= 0; --i)
{
const char *fname = set->info_ordinary.maps[i].to_file;
if (fname && !filename_cmp (fname, file_name))
break;
}
if (i < 0)
return false;
/* The highest location for a given map is either the starting
location of the next map minus one, or -- if the map is the
latest one -- the highest location of the set. */
source_location result;
if (i == (int) set->info_ordinary.used - 1)
result = set->highest_location;
else
result = set->info_ordinary.maps[i + 1].start_location - 1;
*loc = result;
return true;
}
/* Compute and return statistics about the memory consumption of some
parts of the line table SET. */
void
linemap_get_statistics (struct line_maps *set,
struct linemap_stats *s)
{
long ordinary_maps_allocated_size, ordinary_maps_used_size,
macro_maps_allocated_size, macro_maps_used_size,
macro_maps_locations_size = 0, duplicated_macro_maps_locations_size = 0;
const line_map_macro *cur_map;
ordinary_maps_allocated_size =
LINEMAPS_ORDINARY_ALLOCATED (set) * sizeof (struct line_map_ordinary);
ordinary_maps_used_size =
LINEMAPS_ORDINARY_USED (set) * sizeof (struct line_map_ordinary);
macro_maps_allocated_size =
LINEMAPS_MACRO_ALLOCATED (set) * sizeof (struct line_map_macro);
for (cur_map = LINEMAPS_MACRO_MAPS (set);
cur_map && cur_map <= LINEMAPS_LAST_MACRO_MAP (set);
++cur_map)
{
unsigned i;
linemap_assert (linemap_macro_expansion_map_p (cur_map));
macro_maps_locations_size +=
2 * MACRO_MAP_NUM_MACRO_TOKENS (cur_map) * sizeof (source_location);
for (i = 0; i < 2 * MACRO_MAP_NUM_MACRO_TOKENS (cur_map); i += 2)
{
if (MACRO_MAP_LOCATIONS (cur_map)[i] ==
MACRO_MAP_LOCATIONS (cur_map)[i + 1])
duplicated_macro_maps_locations_size +=
sizeof (source_location);
}
}
macro_maps_used_size =
LINEMAPS_MACRO_USED (set) * sizeof (struct line_map_macro);
s->num_ordinary_maps_allocated = LINEMAPS_ORDINARY_ALLOCATED (set);
s->num_ordinary_maps_used = LINEMAPS_ORDINARY_USED (set);
s->ordinary_maps_allocated_size = ordinary_maps_allocated_size;
s->ordinary_maps_used_size = ordinary_maps_used_size;
s->num_expanded_macros = num_expanded_macros_counter;
s->num_macro_tokens = num_macro_tokens_counter;
s->num_macro_maps_used = LINEMAPS_MACRO_USED (set);
s->macro_maps_allocated_size = macro_maps_allocated_size;
s->macro_maps_locations_size = macro_maps_locations_size;
s->macro_maps_used_size = macro_maps_used_size;
s->duplicated_macro_maps_locations_size =
duplicated_macro_maps_locations_size;
s->adhoc_table_size = (set->location_adhoc_data_map.allocated
* sizeof (struct location_adhoc_data));
s->adhoc_table_entries_used = set->location_adhoc_data_map.curr_loc;
}
/* Dump line table SET to STREAM. If STREAM is NULL, stderr is used.
NUM_ORDINARY specifies how many ordinary maps to dump. NUM_MACRO
specifies how many macro maps to dump. */
void
line_table_dump (FILE *stream, struct line_maps *set, unsigned int num_ordinary,
unsigned int num_macro)
{
unsigned int i;
if (set == NULL)
return;
if (stream == NULL)
stream = stderr;
fprintf (stream, "# of ordinary maps: %d\n", LINEMAPS_ORDINARY_USED (set));
fprintf (stream, "# of macro maps: %d\n", LINEMAPS_MACRO_USED (set));
fprintf (stream, "Include stack depth: %d\n", set->depth);
fprintf (stream, "Highest location: %u\n", set->highest_location);
if (num_ordinary)
{
fprintf (stream, "\nOrdinary line maps\n");
for (i = 0; i < num_ordinary && i < LINEMAPS_ORDINARY_USED (set); i++)
linemap_dump (stream, set, i, false);
fprintf (stream, "\n");
}
if (num_macro)
{
fprintf (stream, "\nMacro line maps\n");
for (i = 0; i < num_macro && i < LINEMAPS_MACRO_USED (set); i++)
linemap_dump (stream, set, i, true);
fprintf (stream, "\n");
}
}
/* struct source_range. */
/* Is there any part of this range on the given line? */
bool
source_range::intersects_line_p (const char *file, int line) const
{
expanded_location exploc_start
= linemap_client_expand_location_to_spelling_point (m_start);
if (file != exploc_start.file)
return false;
if (line < exploc_start.line)
return false;
expanded_location exploc_finish
= linemap_client_expand_location_to_spelling_point (m_finish);
if (file != exploc_finish.file)
return false;
if (line > exploc_finish.line)
return false;
return true;
}
/* class rich_location. */
/* Construct a rich_location with location LOC as its initial range. */
rich_location::rich_location (line_maps *set, source_location loc) :
m_loc (loc),
m_num_ranges (0),
m_have_expanded_location (false),
m_num_fixit_hints (0)
{
/* Set up the 0th range, extracting any range from LOC. */
source_range src_range = get_range_from_loc (set, loc);
add_range (src_range, true);
m_ranges[0].m_caret = lazily_expand_location ();
}
/* Construct a rich_location with source_range SRC_RANGE as its
initial range. */
rich_location::rich_location (source_range src_range)
: m_loc (src_range.m_start),
m_num_ranges (0),
m_have_expanded_location (false),
m_num_fixit_hints (0)
{
/* Set up the 0th range: */
add_range (src_range, true);
}
/* The destructor for class rich_location. */
rich_location::~rich_location ()
{
for (unsigned int i = 0; i < m_num_fixit_hints; i++)
delete m_fixit_hints[i];
}
/* Get an expanded_location for this rich_location's primary
location. */
expanded_location
rich_location::lazily_expand_location ()
{
if (!m_have_expanded_location)
{
m_expanded_location
= linemap_client_expand_location_to_spelling_point (m_loc);
m_have_expanded_location = true;
}
return m_expanded_location;
}
/* Set the column of the primary location. This can only be called for
rich_location instances for which the primary location has
caret==start==finish. */
void
rich_location::override_column (int column)
{
lazily_expand_location ();
gcc_assert (m_ranges[0].m_show_caret_p);
gcc_assert (m_ranges[0].m_caret.column == m_expanded_location.column);
gcc_assert (m_ranges[0].m_start.column == m_expanded_location.column);
gcc_assert (m_ranges[0].m_finish.column == m_expanded_location.column);
m_expanded_location.column = column;
m_ranges[0].m_caret.column = column;
m_ranges[0].m_start.column = column;
m_ranges[0].m_finish.column = column;
}
/* Add the given range. */
void
rich_location::add_range (source_location start, source_location finish,
bool show_caret_p)
{
linemap_assert (m_num_ranges < MAX_RANGES);
location_range *range = &m_ranges[m_num_ranges++];
range->m_start = linemap_client_expand_location_to_spelling_point (start);
range->m_finish = linemap_client_expand_location_to_spelling_point (finish);
range->m_caret = range->m_start;
range->m_show_caret_p = show_caret_p;
}
/* Add the given range. */
void
rich_location::add_range (source_range src_range, bool show_caret_p)
{
linemap_assert (m_num_ranges < MAX_RANGES);
add_range (src_range.m_start, src_range.m_finish, show_caret_p);
}
void
rich_location::add_range (location_range *src_range)
{
linemap_assert (m_num_ranges < MAX_RANGES);
m_ranges[m_num_ranges++] = *src_range;
}
/* Add or overwrite the location given by IDX, setting its location to LOC,
and setting its "should my caret be printed" flag to SHOW_CARET_P.
It must either overwrite an existing location, or add one *exactly* on
the end of the array.
This is primarily for use by gcc when implementing diagnostic format
decoders e.g.
- the "+" in the C/C++ frontends, for handling format codes like "%q+D"
(which writes the source location of a tree back into location 0 of
the rich_location), and
- the "%C" and "%L" format codes in the Fortran frontend. */
void
rich_location::set_range (line_maps *set, unsigned int idx,
source_location loc, bool show_caret_p)
{
linemap_assert (idx < MAX_RANGES);
/* We can either overwrite an existing range, or add one exactly
on the end of the array. */
linemap_assert (idx <= m_num_ranges);
source_range src_range = get_range_from_loc (set, loc);
location_range *locrange = &m_ranges[idx];
locrange->m_start
= linemap_client_expand_location_to_spelling_point (src_range.m_start);
locrange->m_finish
= linemap_client_expand_location_to_spelling_point (src_range.m_finish);
locrange->m_show_caret_p = show_caret_p;
locrange->m_caret
= linemap_client_expand_location_to_spelling_point (loc);
/* Are we adding a range onto the end? */
if (idx == m_num_ranges)
m_num_ranges = idx + 1;
if (idx == 0)
{
m_loc = loc;
/* Mark any cached value here as dirty. */
m_have_expanded_location = false;
}
}
/* Add a fixit-hint, suggesting insertion of NEW_CONTENT
at WHERE. */
void
rich_location::add_fixit_insert (source_location where,
const char *new_content)
{
linemap_assert (m_num_fixit_hints < MAX_FIXIT_HINTS);
m_fixit_hints[m_num_fixit_hints++]
= new fixit_insert (where, new_content);
}
/* Add a fixit-hint, suggesting removal of the content at
SRC_RANGE. */
void
rich_location::add_fixit_remove (source_range src_range)
{
linemap_assert (m_num_fixit_hints < MAX_FIXIT_HINTS);
m_fixit_hints[m_num_fixit_hints++] = new fixit_remove (src_range);
}
/* Add a fixit-hint, suggesting replacement of the content at
SRC_RANGE with NEW_CONTENT. */
void
rich_location::add_fixit_replace (source_range src_range,
const char *new_content)
{
linemap_assert (m_num_fixit_hints < MAX_FIXIT_HINTS);
m_fixit_hints[m_num_fixit_hints++]
= new fixit_replace (src_range, new_content);
}
/* class fixit_insert. */
fixit_insert::fixit_insert (source_location where,
const char *new_content)
: m_where (where),
m_bytes (xstrdup (new_content)),
m_len (strlen (new_content))
{
}
fixit_insert::~fixit_insert ()
{
free (m_bytes);
}
/* Implementation of fixit_hint::affects_line_p for fixit_insert. */
bool
fixit_insert::affects_line_p (const char *file, int line)
{
expanded_location exploc
= linemap_client_expand_location_to_spelling_point (m_where);
if (file == exploc.file)
if (line == exploc.line)
return true;
return false;
}
/* class fixit_remove. */
fixit_remove::fixit_remove (source_range src_range)
: m_src_range (src_range)
{
}
/* Implementation of fixit_hint::affects_line_p for fixit_remove. */
bool
fixit_remove::affects_line_p (const char *file, int line)
{
return m_src_range.intersects_line_p (file, line);
}
/* class fixit_replace. */
fixit_replace::fixit_replace (source_range src_range,
const char *new_content)
: m_src_range (src_range),
m_bytes (xstrdup (new_content)),
m_len (strlen (new_content))
{
}
fixit_replace::~fixit_replace ()
{
free (m_bytes);
}
/* Implementation of fixit_hint::affects_line_p for fixit_replace. */
bool
fixit_replace::affects_line_p (const char *file, int line)
{
return m_src_range.intersects_line_p (file, line);
}
| h4ck3rm1k3/gcc-1 | libcpp/line-map.c | C | gpl-2.0 | 71,107 |
/*
* libquicktime yuv4 encoder
*
* Copyright (c) 2011 Carl Eugen Hoyos
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
#include "internal.h"
static av_cold int yuv4_encode_init(AVCodecContext *avctx)
{
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
return AVERROR(ENOMEM);
}
return 0;
}
static int yuv4_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pic, int *got_packet)
{
uint8_t *dst;
uint8_t *y, *u, *v;
int i, j, ret;
if ((ret = ff_alloc_packet2(avctx, pkt, 6 * (avctx->width + 1 >> 1) * (avctx->height + 1 >> 1))) < 0)
return ret;
dst = pkt->data;
avctx->coded_frame->key_frame = 1;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
y = pic->data[0];
u = pic->data[1];
v = pic->data[2];
for (i = 0; i < avctx->height + 1 >> 1; i++) {
for (j = 0; j < avctx->width + 1 >> 1; j++) {
*dst++ = u[j] ^ 0x80;
*dst++ = v[j] ^ 0x80;
*dst++ = y[ 2 * j ];
*dst++ = y[ 2 * j + 1];
*dst++ = y[pic->linesize[0] + 2 * j ];
*dst++ = y[pic->linesize[0] + 2 * j + 1];
}
y += 2 * pic->linesize[0];
u += pic->linesize[1];
v += pic->linesize[2];
}
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
}
static av_cold int yuv4_encode_close(AVCodecContext *avctx)
{
av_freep(&avctx->coded_frame);
return 0;
}
AVCodec ff_yuv4_encoder = {
.name = "yuv4",
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed 4:2:0"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_YUV4,
.init = yuv4_encode_init,
.encode2 = yuv4_encode_frame,
.close = yuv4_encode_close,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
};
| tojo9900/vice | src/lib/libffmpeg/libavcodec/yuv4enc.c | C | gpl-2.0 | 2,758 |
/*
* arch/s390/kernel/sys_s390.c
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Thomas Spatzier (tspat@de.ibm.com)
*
* Derived from "arch/i386/kernel/sys_i386.c"
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/s390
* platform.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/personality.h>
#include <linux/unistd.h>
#include <linux/ipc.h>
#include <asm/uaccess.h>
#include "entry.h"
/*
* Perform the mmap() system call. Linux for S/390 isn't able to handle more
* than 5 system call parameters, so this system call uses a memory block
* for parameter passing.
*/
struct s390_mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
SYSCALL_DEFINE1(mmap2, struct s390_mmap_arg_struct __user *, arg)
{
struct s390_mmap_arg_struct a;
int error = -EFAULT;
if (copy_from_user(&a, arg, sizeof(a)))
goto out;
error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
out:
return error;
}
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls.
*/
SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
unsigned long, third, void __user *, ptr)
{
if (call >> 16)
return -EINVAL;
/* The s390 sys_ipc variant has only five parameters instead of six
* like the generic variant. The only difference is the handling of
* the SEMTIMEDOP subcall where on s390 the third parameter is used
* as a pointer to a struct timespec where the generic variant uses
* the fifth parameter.
* Therefore we can call the generic variant by simply passing the
* third parameter also as fifth parameter.
*/
return sys_ipc(call, first, second, third, ptr, third);
}
#ifdef CONFIG_64BIT
SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
{
unsigned int ret;
if (current->personality == PER_LINUX32 && personality == PER_LINUX)
personality = PER_LINUX32;
ret = sys_personality(personality);
if (ret == PER_LINUX32)
ret = PER_LINUX;
return ret;
}
#endif /* CONFIG_64BIT */
/*
* Wrapper function for sys_fadvise64/fadvise64_64
*/
#ifndef CONFIG_64BIT
SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low,
size_t, len, int, advice)
{
return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
len, advice);
}
struct fadvise64_64_args {
int fd;
long long offset;
long long len;
int advice;
};
SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
{
struct fadvise64_64_args a;
if ( copy_from_user(&a, args, sizeof(a)) )
return -EFAULT;
return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
}
/*
* This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
* 64 bit argument "len" is split into the upper and lower 32 bits. The
* system call wrapper in the user space loads the value to %r6/%r7.
* The code in entry.S keeps the values in %r2 - %r6 where they are and
* stores %r7 to 96(%r15). But the standard C linkage requires that
* the whole 64 bit value for len is stored on the stack and doesn't
* use %r6 at all. So s390_fallocate has to convert the arguments from
* %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len
* to
* %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
*/
SYSCALL_DEFINE(s390_fallocate)(int fd, int mode, loff_t offset,
u32 len_high, u32 len_low)
{
return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
}
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_s390_fallocate(long fd, long mode, loff_t offset,
long len_high, long len_low)
{
return SYSC_s390_fallocate((int) fd, (int) mode, offset,
(u32) len_high, (u32) len_low);
}
SYSCALL_ALIAS(sys_s390_fallocate, SyS_s390_fallocate);
#endif
#endif
| Jackeagle/android_kernel_sony_c2305 | arch/s390/kernel/sys_s390.c | C | gpl-2.0 | 4,389 |
/*
* GIT - The information manager from hell
*
* Copyright (C) Linus Torvalds, 2005
* Copyright (C) Johannes Schindelin, 2005
*
*/
#include "cache.h"
#include "exec_cmd.h"
#include "strbuf.h"
#include "quote.h"
typedef struct config_file {
struct config_file *prev;
FILE *f;
const char *name;
int linenr;
int eof;
struct strbuf value;
struct strbuf var;
} config_file;
static config_file *cf;
static int zlib_compression_seen;
#define MAX_INCLUDE_DEPTH 10
static const char include_depth_advice[] =
"exceeded maximum include depth (%d) while including\n"
" %s\n"
"from\n"
" %s\n"
"Do you have circular includes?";
static int handle_path_include(const char *path, struct config_include_data *inc)
{
int ret = 0;
struct strbuf buf = STRBUF_INIT;
char *expanded = expand_user_path(path);
if (!expanded)
return error("Could not expand include path '%s'", path);
path = expanded;
/*
* Use an absolute path as-is, but interpret relative paths
* based on the including config file.
*/
if (!is_absolute_path(path)) {
char *slash;
if (!cf || !cf->name)
return error("relative config includes must come from files");
slash = find_last_dir_sep(cf->name);
if (slash)
strbuf_add(&buf, cf->name, slash - cf->name + 1);
strbuf_addstr(&buf, path);
path = buf.buf;
}
if (!access_or_die(path, R_OK)) {
if (++inc->depth > MAX_INCLUDE_DEPTH)
die(include_depth_advice, MAX_INCLUDE_DEPTH, path,
cf && cf->name ? cf->name : "the command line");
ret = git_config_from_file(git_config_include, path, inc);
inc->depth--;
}
strbuf_release(&buf);
free(expanded);
return ret;
}
int git_config_include(const char *var, const char *value, void *data)
{
struct config_include_data *inc = data;
const char *type;
int ret;
/*
* Pass along all values, including "include" directives; this makes it
* possible to query information on the includes themselves.
*/
ret = inc->fn(var, value, inc->data);
if (ret < 0)
return ret;
type = skip_prefix(var, "include.");
if (!type)
return ret;
if (!strcmp(type, "path"))
ret = handle_path_include(value, inc);
return ret;
}
static void lowercase(char *p)
{
for (; *p; p++)
*p = tolower(*p);
}
void git_config_push_parameter(const char *text)
{
struct strbuf env = STRBUF_INIT;
const char *old = getenv(CONFIG_DATA_ENVIRONMENT);
if (old) {
strbuf_addstr(&env, old);
strbuf_addch(&env, ' ');
}
sq_quote_buf(&env, text);
setenv(CONFIG_DATA_ENVIRONMENT, env.buf, 1);
strbuf_release(&env);
}
int git_config_parse_parameter(const char *text,
config_fn_t fn, void *data)
{
struct strbuf **pair;
pair = strbuf_split_str(text, '=', 2);
if (!pair[0])
return error("bogus config parameter: %s", text);
if (pair[0]->len && pair[0]->buf[pair[0]->len - 1] == '=')
strbuf_setlen(pair[0], pair[0]->len - 1);
strbuf_trim(pair[0]);
if (!pair[0]->len) {
strbuf_list_free(pair);
return error("bogus config parameter: %s", text);
}
lowercase(pair[0]->buf);
if (fn(pair[0]->buf, pair[1] ? pair[1]->buf : NULL, data) < 0) {
strbuf_list_free(pair);
return -1;
}
strbuf_list_free(pair);
return 0;
}
int git_config_from_parameters(config_fn_t fn, void *data)
{
const char *env = getenv(CONFIG_DATA_ENVIRONMENT);
char *envw;
const char **argv = NULL;
int nr = 0, alloc = 0;
int i;
if (!env)
return 0;
/* sq_dequote will write over it */
envw = xstrdup(env);
if (sq_dequote_to_argv(envw, &argv, &nr, &alloc) < 0) {
free(envw);
return error("bogus format in " CONFIG_DATA_ENVIRONMENT);
}
for (i = 0; i < nr; i++) {
if (git_config_parse_parameter(argv[i], fn, data) < 0) {
free(argv);
free(envw);
return -1;
}
}
free(argv);
free(envw);
return nr > 0;
}
static int get_next_char(void)
{
int c;
FILE *f;
c = '\n';
if (cf && ((f = cf->f) != NULL)) {
c = fgetc(f);
if (c == '\r') {
/* DOS like systems */
c = fgetc(f);
if (c != '\n') {
ungetc(c, f);
c = '\r';
}
}
if (c == '\n')
cf->linenr++;
if (c == EOF) {
cf->eof = 1;
c = '\n';
}
}
return c;
}
static char *parse_value(void)
{
int quote = 0, comment = 0, space = 0;
strbuf_reset(&cf->value);
for (;;) {
int c = get_next_char();
if (c == '\n') {
if (quote) {
cf->linenr--;
return NULL;
}
return cf->value.buf;
}
if (comment)
continue;
if (isspace(c) && !quote) {
if (cf->value.len)
space++;
continue;
}
if (!quote) {
if (c == ';' || c == '#') {
comment = 1;
continue;
}
}
for (; space; space--)
strbuf_addch(&cf->value, ' ');
if (c == '\\') {
c = get_next_char();
switch (c) {
case '\n':
continue;
case 't':
c = '\t';
break;
case 'b':
c = '\b';
break;
case 'n':
c = '\n';
break;
/* Some characters escape as themselves */
case '\\': case '"':
break;
/* Reject unknown escape sequences */
default:
return NULL;
}
strbuf_addch(&cf->value, c);
continue;
}
if (c == '"') {
quote = 1-quote;
continue;
}
strbuf_addch(&cf->value, c);
}
}
static inline int iskeychar(int c)
{
return isalnum(c) || c == '-';
}
static int get_value(config_fn_t fn, void *data, struct strbuf *name)
{
int c;
char *value;
/* Get the full name */
for (;;) {
c = get_next_char();
if (cf->eof)
break;
if (!iskeychar(c))
break;
strbuf_addch(name, tolower(c));
}
while (c == ' ' || c == '\t')
c = get_next_char();
value = NULL;
if (c != '\n') {
if (c != '=')
return -1;
value = parse_value();
if (!value)
return -1;
}
return fn(name->buf, value, data);
}
static int get_extended_base_var(struct strbuf *name, int c)
{
do {
if (c == '\n')
goto error_incomplete_line;
c = get_next_char();
} while (isspace(c));
/* We require the format to be '[base "extension"]' */
if (c != '"')
return -1;
strbuf_addch(name, '.');
for (;;) {
int c = get_next_char();
if (c == '\n')
goto error_incomplete_line;
if (c == '"')
break;
if (c == '\\') {
c = get_next_char();
if (c == '\n')
goto error_incomplete_line;
}
strbuf_addch(name, c);
}
/* Final ']' */
if (get_next_char() != ']')
return -1;
return 0;
error_incomplete_line:
cf->linenr--;
return -1;
}
static int get_base_var(struct strbuf *name)
{
for (;;) {
int c = get_next_char();
if (cf->eof)
return -1;
if (c == ']')
return 0;
if (isspace(c))
return get_extended_base_var(name, c);
if (!iskeychar(c) && c != '.')
return -1;
strbuf_addch(name, tolower(c));
}
}
static int git_parse_file(config_fn_t fn, void *data)
{
int comment = 0;
int baselen = 0;
struct strbuf *var = &cf->var;
/* U+FEFF Byte Order Mark in UTF8 */
static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf";
const unsigned char *bomptr = utf8_bom;
for (;;) {
int c = get_next_char();
if (bomptr && *bomptr) {
/* We are at the file beginning; skip UTF8-encoded BOM
* if present. Sane editors won't put this in on their
* own, but e.g. Windows Notepad will do it happily. */
if ((unsigned char) c == *bomptr) {
bomptr++;
continue;
} else {
/* Do not tolerate partial BOM. */
if (bomptr != utf8_bom)
break;
/* No BOM at file beginning. Cool. */
bomptr = NULL;
}
}
if (c == '\n') {
if (cf->eof)
return 0;
comment = 0;
continue;
}
if (comment || isspace(c))
continue;
if (c == '#' || c == ';') {
comment = 1;
continue;
}
if (c == '[') {
/* Reset prior to determining a new stem */
strbuf_reset(var);
if (get_base_var(var) < 0 || var->len < 1)
break;
strbuf_addch(var, '.');
baselen = var->len;
continue;
}
if (!isalpha(c))
break;
/*
* Truncate the var name back to the section header
* stem prior to grabbing the suffix part of the name
* and the value.
*/
strbuf_setlen(var, baselen);
strbuf_addch(var, tolower(c));
if (get_value(fn, data, var) < 0)
break;
}
die("bad config file line %d in %s", cf->linenr, cf->name);
}
static int parse_unit_factor(const char *end, uintmax_t *val)
{
if (!*end)
return 1;
else if (!strcasecmp(end, "k")) {
*val *= 1024;
return 1;
}
else if (!strcasecmp(end, "m")) {
*val *= 1024 * 1024;
return 1;
}
else if (!strcasecmp(end, "g")) {
*val *= 1024 * 1024 * 1024;
return 1;
}
return 0;
}
static int git_parse_long(const char *value, long *ret)
{
if (value && *value) {
char *end;
intmax_t val;
uintmax_t uval;
uintmax_t factor = 1;
errno = 0;
val = strtoimax(value, &end, 0);
if (errno == ERANGE)
return 0;
if (!parse_unit_factor(end, &factor))
return 0;
uval = abs(val);
uval *= factor;
if ((uval > maximum_signed_value_of_type(long)) ||
(abs(val) > uval))
return 0;
val *= factor;
*ret = val;
return 1;
}
return 0;
}
int git_parse_ulong(const char *value, unsigned long *ret)
{
if (value && *value) {
char *end;
uintmax_t val;
uintmax_t oldval;
errno = 0;
val = strtoumax(value, &end, 0);
if (errno == ERANGE)
return 0;
oldval = val;
if (!parse_unit_factor(end, &val))
return 0;
if ((val > maximum_unsigned_value_of_type(long)) ||
(oldval > val))
return 0;
*ret = val;
return 1;
}
return 0;
}
static void die_bad_config(const char *name)
{
if (cf && cf->name)
die("bad config value for '%s' in %s", name, cf->name);
die("bad config value for '%s'", name);
}
int git_config_int(const char *name, const char *value)
{
long ret = 0;
if (!git_parse_long(value, &ret))
die_bad_config(name);
return ret;
}
unsigned long git_config_ulong(const char *name, const char *value)
{
unsigned long ret;
if (!git_parse_ulong(value, &ret))
die_bad_config(name);
return ret;
}
static int git_config_maybe_bool_text(const char *name, const char *value)
{
if (!value)
return 1;
if (!*value)
return 0;
if (!strcasecmp(value, "true")
|| !strcasecmp(value, "yes")
|| !strcasecmp(value, "on"))
return 1;
if (!strcasecmp(value, "false")
|| !strcasecmp(value, "no")
|| !strcasecmp(value, "off"))
return 0;
return -1;
}
int git_config_maybe_bool(const char *name, const char *value)
{
long v = git_config_maybe_bool_text(name, value);
if (0 <= v)
return v;
if (git_parse_long(value, &v))
return !!v;
return -1;
}
int git_config_bool_or_int(const char *name, const char *value, int *is_bool)
{
int v = git_config_maybe_bool_text(name, value);
if (0 <= v) {
*is_bool = 1;
return v;
}
*is_bool = 0;
return git_config_int(name, value);
}
int git_config_bool(const char *name, const char *value)
{
int discard;
return !!git_config_bool_or_int(name, value, &discard);
}
int git_config_string(const char **dest, const char *var, const char *value)
{
if (!value)
return config_error_nonbool(var);
*dest = xstrdup(value);
return 0;
}
int git_config_pathname(const char **dest, const char *var, const char *value)
{
if (!value)
return config_error_nonbool(var);
*dest = expand_user_path(value);
if (!*dest)
die("Failed to expand user dir in: '%s'", value);
return 0;
}
static int git_default_core_config(const char *var, const char *value)
{
/* This needs a better name */
if (!strcmp(var, "core.filemode")) {
trust_executable_bit = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.trustctime")) {
trust_ctime = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.statinfo")) {
if (!strcasecmp(value, "default"))
check_stat = 1;
else if (!strcasecmp(value, "minimal"))
check_stat = 0;
}
if (!strcmp(var, "core.quotepath")) {
quote_path_fully = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.symlinks")) {
has_symlinks = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.ignorecase")) {
ignore_case = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.attributesfile"))
return git_config_pathname(&git_attributes_file, var, value);
if (!strcmp(var, "core.bare")) {
is_bare_repository_cfg = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.ignorestat")) {
assume_unchanged = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.prefersymlinkrefs")) {
prefer_symlink_refs = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.logallrefupdates")) {
log_all_ref_updates = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.warnambiguousrefs")) {
warn_ambiguous_refs = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.abbrev")) {
int abbrev = git_config_int(var, value);
if (abbrev < minimum_abbrev || abbrev > 40)
return -1;
default_abbrev = abbrev;
return 0;
}
if (!strcmp(var, "core.loosecompression")) {
int level = git_config_int(var, value);
if (level == -1)
level = Z_DEFAULT_COMPRESSION;
else if (level < 0 || level > Z_BEST_COMPRESSION)
die("bad zlib compression level %d", level);
zlib_compression_level = level;
zlib_compression_seen = 1;
return 0;
}
if (!strcmp(var, "core.compression")) {
int level = git_config_int(var, value);
if (level == -1)
level = Z_DEFAULT_COMPRESSION;
else if (level < 0 || level > Z_BEST_COMPRESSION)
die("bad zlib compression level %d", level);
core_compression_level = level;
core_compression_seen = 1;
if (!zlib_compression_seen)
zlib_compression_level = level;
return 0;
}
if (!strcmp(var, "core.packedgitwindowsize")) {
int pgsz_x2 = getpagesize() * 2;
packed_git_window_size = git_config_ulong(var, value);
/* This value must be multiple of (pagesize * 2) */
packed_git_window_size /= pgsz_x2;
if (packed_git_window_size < 1)
packed_git_window_size = 1;
packed_git_window_size *= pgsz_x2;
return 0;
}
if (!strcmp(var, "core.bigfilethreshold")) {
big_file_threshold = git_config_ulong(var, value);
return 0;
}
if (!strcmp(var, "core.packedgitlimit")) {
packed_git_limit = git_config_ulong(var, value);
return 0;
}
if (!strcmp(var, "core.deltabasecachelimit")) {
delta_base_cache_limit = git_config_ulong(var, value);
return 0;
}
if (!strcmp(var, "core.logpackaccess"))
return git_config_string(&log_pack_access, var, value);
if (!strcmp(var, "core.autocrlf")) {
if (value && !strcasecmp(value, "input")) {
if (core_eol == EOL_CRLF)
return error("core.autocrlf=input conflicts with core.eol=crlf");
auto_crlf = AUTO_CRLF_INPUT;
return 0;
}
auto_crlf = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.safecrlf")) {
if (value && !strcasecmp(value, "warn")) {
safe_crlf = SAFE_CRLF_WARN;
return 0;
}
safe_crlf = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.eol")) {
if (value && !strcasecmp(value, "lf"))
core_eol = EOL_LF;
else if (value && !strcasecmp(value, "crlf"))
core_eol = EOL_CRLF;
else if (value && !strcasecmp(value, "native"))
core_eol = EOL_NATIVE;
else
core_eol = EOL_UNSET;
if (core_eol == EOL_CRLF && auto_crlf == AUTO_CRLF_INPUT)
return error("core.autocrlf=input conflicts with core.eol=crlf");
return 0;
}
if (!strcmp(var, "core.notesref")) {
notes_ref_name = xstrdup(value);
return 0;
}
if (!strcmp(var, "core.pager"))
return git_config_string(&pager_program, var, value);
if (!strcmp(var, "core.editor"))
return git_config_string(&editor_program, var, value);
if (!strcmp(var, "core.commentchar")) {
const char *comment;
int ret = git_config_string(&comment, var, value);
if (!ret)
comment_line_char = comment[0];
return ret;
}
if (!strcmp(var, "core.askpass"))
return git_config_string(&askpass_program, var, value);
if (!strcmp(var, "core.excludesfile"))
return git_config_pathname(&excludes_file, var, value);
if (!strcmp(var, "core.whitespace")) {
if (!value)
return config_error_nonbool(var);
whitespace_rule_cfg = parse_whitespace_rule(value);
return 0;
}
if (!strcmp(var, "core.fsyncobjectfiles")) {
fsync_object_files = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.preloadindex")) {
core_preload_index = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.createobject")) {
if (!strcmp(value, "rename"))
object_creation_mode = OBJECT_CREATION_USES_RENAMES;
else if (!strcmp(value, "link"))
object_creation_mode = OBJECT_CREATION_USES_HARDLINKS;
else
die("Invalid mode for object creation: %s", value);
return 0;
}
if (!strcmp(var, "core.sparsecheckout")) {
core_apply_sparse_checkout = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.precomposeunicode")) {
precomposed_unicode = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "core.hidedotfiles")) {
if (value && !strcasecmp(value, "dotgitonly")) {
hide_dotfiles = HIDE_DOTFILES_DOTGITONLY;
return 0;
}
hide_dotfiles = git_config_bool(var, value);
return 0;
}
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
static int git_default_i18n_config(const char *var, const char *value)
{
if (!strcmp(var, "i18n.commitencoding"))
return git_config_string(&git_commit_encoding, var, value);
if (!strcmp(var, "i18n.logoutputencoding"))
return git_config_string(&git_log_output_encoding, var, value);
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
static int git_default_branch_config(const char *var, const char *value)
{
if (!strcmp(var, "branch.autosetupmerge")) {
if (value && !strcasecmp(value, "always")) {
git_branch_track = BRANCH_TRACK_ALWAYS;
return 0;
}
git_branch_track = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "branch.autosetuprebase")) {
if (!value)
return config_error_nonbool(var);
else if (!strcmp(value, "never"))
autorebase = AUTOREBASE_NEVER;
else if (!strcmp(value, "local"))
autorebase = AUTOREBASE_LOCAL;
else if (!strcmp(value, "remote"))
autorebase = AUTOREBASE_REMOTE;
else if (!strcmp(value, "always"))
autorebase = AUTOREBASE_ALWAYS;
else
return error("Malformed value for %s", var);
return 0;
}
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
static int git_default_push_config(const char *var, const char *value)
{
if (!strcmp(var, "push.default")) {
if (!value)
return config_error_nonbool(var);
else if (!strcmp(value, "nothing"))
push_default = PUSH_DEFAULT_NOTHING;
else if (!strcmp(value, "matching"))
push_default = PUSH_DEFAULT_MATCHING;
else if (!strcmp(value, "simple"))
push_default = PUSH_DEFAULT_SIMPLE;
else if (!strcmp(value, "upstream"))
push_default = PUSH_DEFAULT_UPSTREAM;
else if (!strcmp(value, "tracking")) /* deprecated */
push_default = PUSH_DEFAULT_UPSTREAM;
else if (!strcmp(value, "current"))
push_default = PUSH_DEFAULT_CURRENT;
else {
error("Malformed value for %s: %s", var, value);
return error("Must be one of nothing, matching, simple, "
"upstream or current.");
}
return 0;
}
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
static int git_default_mailmap_config(const char *var, const char *value)
{
if (!strcmp(var, "mailmap.file"))
return git_config_string(&git_mailmap_file, var, value);
if (!strcmp(var, "mailmap.blob"))
return git_config_string(&git_mailmap_blob, var, value);
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
int git_default_config(const char *var, const char *value, void *dummy)
{
if (!prefixcmp(var, "core."))
return git_default_core_config(var, value);
if (!prefixcmp(var, "user."))
return git_ident_config(var, value, dummy);
if (!prefixcmp(var, "i18n."))
return git_default_i18n_config(var, value);
if (!prefixcmp(var, "branch."))
return git_default_branch_config(var, value);
if (!prefixcmp(var, "push."))
return git_default_push_config(var, value);
if (!prefixcmp(var, "mailmap."))
return git_default_mailmap_config(var, value);
if (!prefixcmp(var, "advice."))
return git_default_advice_config(var, value);
if (!strcmp(var, "pager.color") || !strcmp(var, "color.pager")) {
pager_use_color = git_config_bool(var,value);
return 0;
}
if (!strcmp(var, "pack.packsizelimit")) {
pack_size_limit_cfg = git_config_ulong(var, value);
return 0;
}
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
int git_config_from_file(config_fn_t fn, const char *filename, void *data)
{
int ret;
FILE *f = fopen(filename, "r");
ret = -1;
if (f) {
config_file top;
/* push config-file parsing state stack */
top.prev = cf;
top.f = f;
top.name = filename;
top.linenr = 1;
top.eof = 0;
strbuf_init(&top.value, 1024);
strbuf_init(&top.var, 1024);
cf = ⊤
ret = git_parse_file(fn, data);
/* pop config-file parsing state stack */
strbuf_release(&top.value);
strbuf_release(&top.var);
cf = top.prev;
fclose(f);
}
return ret;
}
const char *git_etc_gitconfig(void)
{
static const char *system_wide;
if (!system_wide)
system_wide = system_path(ETC_GITCONFIG);
return system_wide;
}
int git_env_bool(const char *k, int def)
{
const char *v = getenv(k);
return v ? git_config_bool(k, v) : def;
}
int git_config_system(void)
{
return !git_env_bool("GIT_CONFIG_NOSYSTEM", 0);
}
int git_config_early(config_fn_t fn, void *data, const char *repo_config)
{
int ret = 0, found = 0;
char *xdg_config = NULL;
char *user_config = NULL;
home_config_paths(&user_config, &xdg_config, "config");
if (git_config_system() && !access_or_die(git_etc_gitconfig(), R_OK)) {
ret += git_config_from_file(fn, git_etc_gitconfig(),
data);
found += 1;
}
if (xdg_config && !access_or_die(xdg_config, R_OK)) {
ret += git_config_from_file(fn, xdg_config, data);
found += 1;
}
if (user_config && !access_or_die(user_config, R_OK)) {
ret += git_config_from_file(fn, user_config, data);
found += 1;
}
if (repo_config && !access_or_die(repo_config, R_OK)) {
ret += git_config_from_file(fn, repo_config, data);
found += 1;
}
switch (git_config_from_parameters(fn, data)) {
case -1: /* error */
die("unable to parse command-line config");
break;
case 0: /* found nothing */
break;
default: /* found at least one item */
found++;
break;
}
free(xdg_config);
free(user_config);
return ret == 0 ? found : ret;
}
int git_config_with_options(config_fn_t fn, void *data,
const char *filename, int respect_includes)
{
char *repo_config = NULL;
int ret;
struct config_include_data inc = CONFIG_INCLUDE_INIT;
if (respect_includes) {
inc.fn = fn;
inc.data = data;
fn = git_config_include;
data = &inc;
}
/*
* If we have a specific filename, use it. Otherwise, follow the
* regular lookup sequence.
*/
if (filename)
return git_config_from_file(fn, filename, data);
repo_config = git_pathdup("config");
ret = git_config_early(fn, data, repo_config);
if (repo_config)
free(repo_config);
return ret;
}
int git_config(config_fn_t fn, void *data)
{
return git_config_with_options(fn, data, NULL, 1);
}
/*
* Find all the stuff for git_config_set() below.
*/
#define MAX_MATCHES 512
static struct {
int baselen;
char *key;
int do_not_match;
regex_t *value_regex;
int multi_replace;
size_t offset[MAX_MATCHES];
enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state;
int seen;
} store;
static int matches(const char *key, const char *value)
{
return !strcmp(key, store.key) &&
(store.value_regex == NULL ||
(store.do_not_match ^
!regexec(store.value_regex, value, 0, NULL, 0)));
}
static int store_aux(const char *key, const char *value, void *cb)
{
const char *ep;
size_t section_len;
FILE *f = cf->f;
switch (store.state) {
case KEY_SEEN:
if (matches(key, value)) {
if (store.seen == 1 && store.multi_replace == 0) {
warning("%s has multiple values", key);
} else if (store.seen >= MAX_MATCHES) {
error("too many matches for %s", key);
return 1;
}
store.offset[store.seen] = ftell(f);
store.seen++;
}
break;
case SECTION_SEEN:
/*
* What we are looking for is in store.key (both
* section and var), and its section part is baselen
* long. We found key (again, both section and var).
* We would want to know if this key is in the same
* section as what we are looking for. We already
* know we are in the same section as what should
* hold store.key.
*/
ep = strrchr(key, '.');
section_len = ep - key;
if ((section_len != store.baselen) ||
memcmp(key, store.key, section_len+1)) {
store.state = SECTION_END_SEEN;
break;
}
/*
* Do not increment matches: this is no match, but we
* just made sure we are in the desired section.
*/
store.offset[store.seen] = ftell(f);
/* fallthru */
case SECTION_END_SEEN:
case START:
if (matches(key, value)) {
store.offset[store.seen] = ftell(f);
store.state = KEY_SEEN;
store.seen++;
} else {
if (strrchr(key, '.') - key == store.baselen &&
!strncmp(key, store.key, store.baselen)) {
store.state = SECTION_SEEN;
store.offset[store.seen] = ftell(f);
}
}
}
return 0;
}
static int write_error(const char *filename)
{
error("failed to write new configuration file %s", filename);
/* Same error code as "failed to rename". */
return 4;
}
static int store_write_section(int fd, const char *key)
{
const char *dot;
int i, success;
struct strbuf sb = STRBUF_INIT;
dot = memchr(key, '.', store.baselen);
if (dot) {
strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key);
for (i = dot - key + 1; i < store.baselen; i++) {
if (key[i] == '"' || key[i] == '\\')
strbuf_addch(&sb, '\\');
strbuf_addch(&sb, key[i]);
}
strbuf_addstr(&sb, "\"]\n");
} else {
strbuf_addf(&sb, "[%.*s]\n", store.baselen, key);
}
success = write_in_full(fd, sb.buf, sb.len) == sb.len;
strbuf_release(&sb);
return success;
}
static int store_write_pair(int fd, const char *key, const char *value)
{
int i, success;
int length = strlen(key + store.baselen + 1);
const char *quote = "";
struct strbuf sb = STRBUF_INIT;
/*
* Check to see if the value needs to be surrounded with a dq pair.
* Note that problematic characters are always backslash-quoted; this
* check is about not losing leading or trailing SP and strings that
* follow beginning-of-comment characters (i.e. ';' and '#') by the
* configuration parser.
*/
if (value[0] == ' ')
quote = "\"";
for (i = 0; value[i]; i++)
if (value[i] == ';' || value[i] == '#')
quote = "\"";
if (i && value[i - 1] == ' ')
quote = "\"";
strbuf_addf(&sb, "\t%.*s = %s",
length, key + store.baselen + 1, quote);
for (i = 0; value[i]; i++)
switch (value[i]) {
case '\n':
strbuf_addstr(&sb, "\\n");
break;
case '\t':
strbuf_addstr(&sb, "\\t");
break;
case '"':
case '\\':
strbuf_addch(&sb, '\\');
default:
strbuf_addch(&sb, value[i]);
break;
}
strbuf_addf(&sb, "%s\n", quote);
success = write_in_full(fd, sb.buf, sb.len) == sb.len;
strbuf_release(&sb);
return success;
}
static ssize_t find_beginning_of_line(const char *contents, size_t size,
size_t offset_, int *found_bracket)
{
size_t equal_offset = size, bracket_offset = size;
ssize_t offset;
contline:
for (offset = offset_-2; offset > 0
&& contents[offset] != '\n'; offset--)
switch (contents[offset]) {
case '=': equal_offset = offset; break;
case ']': bracket_offset = offset; break;
}
if (offset > 0 && contents[offset-1] == '\\') {
offset_ = offset;
goto contline;
}
if (bracket_offset < equal_offset) {
*found_bracket = 1;
offset = bracket_offset+1;
} else
offset++;
return offset;
}
int git_config_set_in_file(const char *config_filename,
const char *key, const char *value)
{
return git_config_set_multivar_in_file(config_filename, key, value, NULL, 0);
}
int git_config_set(const char *key, const char *value)
{
return git_config_set_multivar(key, value, NULL, 0);
}
/*
* Auxiliary function to sanity-check and split the key into the section
* identifier and variable name.
*
* Returns 0 on success, -1 when there is an invalid character in the key and
* -2 if there is no section name in the key.
*
* store_key - pointer to char* which will hold a copy of the key with
* lowercase section and variable name
* baselen - pointer to int which will hold the length of the
* section + subsection part, can be NULL
*/
int git_config_parse_key(const char *key, char **store_key, int *baselen_)
{
int i, dot, baselen;
const char *last_dot = strrchr(key, '.');
/*
* Since "key" actually contains the section name and the real
* key name separated by a dot, we have to know where the dot is.
*/
if (last_dot == NULL || last_dot == key) {
error("key does not contain a section: %s", key);
return -CONFIG_NO_SECTION_OR_NAME;
}
if (!last_dot[1]) {
error("key does not contain variable name: %s", key);
return -CONFIG_NO_SECTION_OR_NAME;
}
baselen = last_dot - key;
if (baselen_)
*baselen_ = baselen;
/*
* Validate the key and while at it, lower case it for matching.
*/
*store_key = xmalloc(strlen(key) + 1);
dot = 0;
for (i = 0; key[i]; i++) {
unsigned char c = key[i];
if (c == '.')
dot = 1;
/* Leave the extended basename untouched.. */
if (!dot || i > baselen) {
if (!iskeychar(c) ||
(i == baselen + 1 && !isalpha(c))) {
error("invalid key: %s", key);
goto out_free_ret_1;
}
c = tolower(c);
} else if (c == '\n') {
error("invalid key (newline): %s", key);
goto out_free_ret_1;
}
(*store_key)[i] = c;
}
(*store_key)[i] = 0;
return 0;
out_free_ret_1:
free(*store_key);
*store_key = NULL;
return -CONFIG_INVALID_KEY;
}
/*
* If value==NULL, unset in (remove from) config,
* if value_regex!=NULL, disregard key/value pairs where value does not match.
* if multi_replace==0, nothing, or only one matching key/value is replaced,
* else all matching key/values (regardless how many) are removed,
* before the new pair is written.
*
* Returns 0 on success.
*
* This function does this:
*
* - it locks the config file by creating ".git/config.lock"
*
* - it then parses the config using store_aux() as validator to find
* the position on the key/value pair to replace. If it is to be unset,
* it must be found exactly once.
*
* - the config file is mmap()ed and the part before the match (if any) is
* written to the lock file, then the changed part and the rest.
*
* - the config file is removed and the lock file rename()d to it.
*
*/
int git_config_set_multivar_in_file(const char *config_filename,
const char *key, const char *value,
const char *value_regex, int multi_replace)
{
int fd = -1, in_fd;
int ret;
struct lock_file *lock = NULL;
char *filename_buf = NULL;
/* parse-key returns negative; flip the sign to feed exit(3) */
ret = 0 - git_config_parse_key(key, &store.key, &store.baselen);
if (ret)
goto out_free;
store.multi_replace = multi_replace;
if (!config_filename)
config_filename = filename_buf = git_pathdup("config");
/*
* The lock serves a purpose in addition to locking: the new
* contents of .git/config will be written into it.
*/
lock = xcalloc(sizeof(struct lock_file), 1);
fd = hold_lock_file_for_update(lock, config_filename, 0);
if (fd < 0) {
error("could not lock config file %s: %s", config_filename, strerror(errno));
free(store.key);
ret = CONFIG_NO_LOCK;
goto out_free;
}
/*
* If .git/config does not exist yet, write a minimal version.
*/
in_fd = open(config_filename, O_RDONLY);
if ( in_fd < 0 ) {
free(store.key);
if ( ENOENT != errno ) {
error("opening %s: %s", config_filename,
strerror(errno));
ret = CONFIG_INVALID_FILE; /* same as "invalid config file" */
goto out_free;
}
/* if nothing to unset, error out */
if (value == NULL) {
ret = CONFIG_NOTHING_SET;
goto out_free;
}
store.key = (char *)key;
if (!store_write_section(fd, key) ||
!store_write_pair(fd, key, value))
goto write_err_out;
} else {
struct stat st;
char *contents;
size_t contents_sz, copy_begin, copy_end;
int i, new_line = 0;
if (value_regex == NULL)
store.value_regex = NULL;
else {
if (value_regex[0] == '!') {
store.do_not_match = 1;
value_regex++;
} else
store.do_not_match = 0;
store.value_regex = (regex_t*)xmalloc(sizeof(regex_t));
if (regcomp(store.value_regex, value_regex,
REG_EXTENDED)) {
error("invalid pattern: %s", value_regex);
free(store.value_regex);
ret = CONFIG_INVALID_PATTERN;
goto out_free;
}
}
store.offset[0] = 0;
store.state = START;
store.seen = 0;
/*
* After this, store.offset will contain the *end* offset
* of the last match, or remain at 0 if no match was found.
* As a side effect, we make sure to transform only a valid
* existing config file.
*/
if (git_config_from_file(store_aux, config_filename, NULL)) {
error("invalid config file %s", config_filename);
free(store.key);
if (store.value_regex != NULL) {
regfree(store.value_regex);
free(store.value_regex);
}
ret = CONFIG_INVALID_FILE;
goto out_free;
}
free(store.key);
if (store.value_regex != NULL) {
regfree(store.value_regex);
free(store.value_regex);
}
/* if nothing to unset, or too many matches, error out */
if ((store.seen == 0 && value == NULL) ||
(store.seen > 1 && multi_replace == 0)) {
ret = CONFIG_NOTHING_SET;
goto out_free;
}
fstat(in_fd, &st);
contents_sz = xsize_t(st.st_size);
contents = xmmap(NULL, contents_sz, PROT_READ,
MAP_PRIVATE, in_fd, 0);
close(in_fd);
if (store.seen == 0)
store.seen = 1;
for (i = 0, copy_begin = 0; i < store.seen; i++) {
if (store.offset[i] == 0) {
store.offset[i] = copy_end = contents_sz;
} else if (store.state != KEY_SEEN) {
copy_end = store.offset[i];
} else
copy_end = find_beginning_of_line(
contents, contents_sz,
store.offset[i]-2, &new_line);
if (copy_end > 0 && contents[copy_end-1] != '\n')
new_line = 1;
/* write the first part of the config */
if (copy_end > copy_begin) {
if (write_in_full(fd, contents + copy_begin,
copy_end - copy_begin) <
copy_end - copy_begin)
goto write_err_out;
if (new_line &&
write_str_in_full(fd, "\n") != 1)
goto write_err_out;
}
copy_begin = store.offset[i];
}
/* write the pair (value == NULL means unset) */
if (value != NULL) {
if (store.state == START) {
if (!store_write_section(fd, key))
goto write_err_out;
}
if (!store_write_pair(fd, key, value))
goto write_err_out;
}
/* write the rest of the config */
if (copy_begin < contents_sz)
if (write_in_full(fd, contents + copy_begin,
contents_sz - copy_begin) <
contents_sz - copy_begin)
goto write_err_out;
munmap(contents, contents_sz);
}
if (commit_lock_file(lock) < 0) {
error("could not commit config file %s", config_filename);
ret = CONFIG_NO_WRITE;
goto out_free;
}
/*
* lock is committed, so don't try to roll it back below.
* NOTE: Since lockfile.c keeps a linked list of all created
* lock_file structures, it isn't safe to free(lock). It's
* better to just leave it hanging around.
*/
lock = NULL;
ret = 0;
out_free:
if (lock)
rollback_lock_file(lock);
free(filename_buf);
return ret;
write_err_out:
ret = write_error(lock->filename);
goto out_free;
}
int git_config_set_multivar(const char *key, const char *value,
const char *value_regex, int multi_replace)
{
return git_config_set_multivar_in_file(NULL, key, value, value_regex,
multi_replace);
}
static int section_name_match (const char *buf, const char *name)
{
int i = 0, j = 0, dot = 0;
if (buf[i] != '[')
return 0;
for (i = 1; buf[i] && buf[i] != ']'; i++) {
if (!dot && isspace(buf[i])) {
dot = 1;
if (name[j++] != '.')
break;
for (i++; isspace(buf[i]); i++)
; /* do nothing */
if (buf[i] != '"')
break;
continue;
}
if (buf[i] == '\\' && dot)
i++;
else if (buf[i] == '"' && dot) {
for (i++; isspace(buf[i]); i++)
; /* do_nothing */
break;
}
if (buf[i] != name[j++])
break;
}
if (buf[i] == ']' && name[j] == 0) {
/*
* We match, now just find the right length offset by
* gobbling up any whitespace after it, as well
*/
i++;
for (; buf[i] && isspace(buf[i]); i++)
; /* do nothing */
return i;
}
return 0;
}
static int section_name_is_ok(const char *name)
{
/* Empty section names are bogus. */
if (!*name)
return 0;
/*
* Before a dot, we must be alphanumeric or dash. After the first dot,
* anything goes, so we can stop checking.
*/
for (; *name && *name != '.'; name++)
if (*name != '-' && !isalnum(*name))
return 0;
return 1;
}
/* if new_name == NULL, the section is removed instead */
int git_config_rename_section_in_file(const char *config_filename,
const char *old_name, const char *new_name)
{
int ret = 0, remove = 0;
char *filename_buf = NULL;
struct lock_file *lock;
int out_fd;
char buf[1024];
FILE *config_file;
if (new_name && !section_name_is_ok(new_name)) {
ret = error("invalid section name: %s", new_name);
goto out;
}
if (!config_filename)
config_filename = filename_buf = git_pathdup("config");
lock = xcalloc(sizeof(struct lock_file), 1);
out_fd = hold_lock_file_for_update(lock, config_filename, 0);
if (out_fd < 0) {
ret = error("could not lock config file %s", config_filename);
goto out;
}
if (!(config_file = fopen(config_filename, "rb"))) {
/* no config file means nothing to rename, no error */
goto unlock_and_out;
}
while (fgets(buf, sizeof(buf), config_file)) {
int i;
int length;
char *output = buf;
for (i = 0; buf[i] && isspace(buf[i]); i++)
; /* do nothing */
if (buf[i] == '[') {
/* it's a section */
int offset = section_name_match(&buf[i], old_name);
if (offset > 0) {
ret++;
if (new_name == NULL) {
remove = 1;
continue;
}
store.baselen = strlen(new_name);
if (!store_write_section(out_fd, new_name)) {
ret = write_error(lock->filename);
goto out;
}
/*
* We wrote out the new section, with
* a newline, now skip the old
* section's length
*/
output += offset + i;
if (strlen(output) > 0) {
/*
* More content means there's
* a declaration to put on the
* next line; indent with a
* tab
*/
output -= 1;
output[0] = '\t';
}
}
remove = 0;
}
if (remove)
continue;
length = strlen(output);
if (write_in_full(out_fd, output, length) != length) {
ret = write_error(lock->filename);
goto out;
}
}
fclose(config_file);
unlock_and_out:
if (commit_lock_file(lock) < 0)
ret = error("could not commit config file %s", config_filename);
out:
free(filename_buf);
return ret;
}
int git_config_rename_section(const char *old_name, const char *new_name)
{
return git_config_rename_section_in_file(NULL, old_name, new_name);
}
/*
* Call this to report error for your variable that should not
* get a boolean value (i.e. "[my] var" means "true").
*/
#undef config_error_nonbool
int config_error_nonbool(const char *var)
{
return error("Missing value for '%s'", var);
}
int parse_config_key(const char *var,
const char *section,
const char **subsection, int *subsection_len,
const char **key)
{
int section_len = strlen(section);
const char *dot;
/* Does it start with "section." ? */
if (prefixcmp(var, section) || var[section_len] != '.')
return -1;
/*
* Find the key; we don't know yet if we have a subsection, but we must
* parse backwards from the end, since the subsection may have dots in
* it, too.
*/
dot = strrchr(var, '.');
*key = dot + 1;
/* Did we have a subsection at all? */
if (dot == var + section_len) {
*subsection = NULL;
*subsection_len = 0;
}
else {
*subsection = var + section_len + 1;
*subsection_len = dot - *subsection;
}
return 0;
}
| Devindik/origin | config.c | C | gpl-2.0 | 40,138 |
/* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754sp.h"
/* close to ieeep754sp_logb
*/
ieee754sp ieee754sp_frexp(ieee754sp x, int *eptr)
{
COMPXSP;
CLEARCX;
EXPLODEXSP;
switch (xc) {
case IEEE754_CLASS_SNAN:
case IEEE754_CLASS_QNAN:
case IEEE754_CLASS_INF:
case IEEE754_CLASS_ZERO:
*eptr = 0;
return x;
case IEEE754_CLASS_DNORM:
SPDNORMX;
break;
case IEEE754_CLASS_NORM:
break;
}
*eptr = xe + 1;
return buildsp(xs, -1 + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
}
| evolver56k/xpenology | arch/mips/math-emu/sp_frexp.c | C | gpl-2.0 | 1,405 |
/* LibTomCrypt, modular cryptographic library -- Tom St Denis
*
* LibTomCrypt is a library that provides various cryptographic
* algorithms in a highly modular and flexible manner.
*
* The library is free for all purposes without any express
* guarantee it works.
*/
#include "tomcrypt_private.h"
/**
@file rsa_import.c
Import a PKCS RSA key, Tom St Denis
*/
#ifdef LTC_MRSA
/**
Import an RSAPublicKey or RSAPrivateKey [two-prime only, only support >= 1024-bit keys, defined in PKCS #1 v2.1]
@param in The packet to import from
@param inlen It's length (octets)
@param key [out] Destination for newly imported key
@return CRYPT_OK if successful, upon error allocated memory is freed
*/
int rsa_import(const unsigned char *in, unsigned long inlen, rsa_key *key)
{
int err;
void *zero;
unsigned char *tmpbuf=NULL;
unsigned long tmpbuf_len, len;
LTC_ARGCHK(in != NULL);
LTC_ARGCHK(key != NULL);
LTC_ARGCHK(ltc_mp.name != NULL);
/* init key */
if ((err = mp_init_multi(&key->e, &key->d, &key->N, &key->dQ,
&key->dP, &key->qP, &key->p, &key->q, NULL)) != CRYPT_OK) {
return err;
}
/* see if the OpenSSL DER format RSA public key will work */
tmpbuf_len = inlen;
tmpbuf = XCALLOC(1, tmpbuf_len);
if (tmpbuf == NULL) {
err = CRYPT_MEM;
goto LBL_ERR;
}
len = 0;
err = x509_decode_subject_public_key_info(in, inlen,
PKA_RSA, tmpbuf, &tmpbuf_len,
LTC_ASN1_NULL, NULL, &len);
if (err == CRYPT_OK) { /* SubjectPublicKeyInfo format */
/* now it should be SEQUENCE { INTEGER, INTEGER } */
if ((err = der_decode_sequence_multi(tmpbuf, tmpbuf_len,
LTC_ASN1_INTEGER, 1UL, key->N,
LTC_ASN1_INTEGER, 1UL, key->e,
LTC_ASN1_EOL, 0UL, NULL)) != CRYPT_OK) {
goto LBL_ERR;
}
key->type = PK_PUBLIC;
err = CRYPT_OK;
goto LBL_FREE;
}
/* not SSL public key, try to match against PKCS #1 standards */
err = der_decode_sequence_multi(in, inlen, LTC_ASN1_INTEGER, 1UL, key->N,
LTC_ASN1_EOL, 0UL, NULL);
if (err != CRYPT_OK && err != CRYPT_INPUT_TOO_LONG) {
goto LBL_ERR;
}
if (mp_cmp_d(key->N, 0) == LTC_MP_EQ) {
if ((err = mp_init(&zero)) != CRYPT_OK) {
goto LBL_ERR;
}
/* it's a private key */
if ((err = der_decode_sequence_multi(in, inlen,
LTC_ASN1_INTEGER, 1UL, zero,
LTC_ASN1_INTEGER, 1UL, key->N,
LTC_ASN1_INTEGER, 1UL, key->e,
LTC_ASN1_INTEGER, 1UL, key->d,
LTC_ASN1_INTEGER, 1UL, key->p,
LTC_ASN1_INTEGER, 1UL, key->q,
LTC_ASN1_INTEGER, 1UL, key->dP,
LTC_ASN1_INTEGER, 1UL, key->dQ,
LTC_ASN1_INTEGER, 1UL, key->qP,
LTC_ASN1_EOL, 0UL, NULL)) != CRYPT_OK) {
mp_clear(zero);
goto LBL_ERR;
}
mp_clear(zero);
key->type = PK_PRIVATE;
} else if (mp_cmp_d(key->N, 1) == LTC_MP_EQ) {
/* we don't support multi-prime RSA */
err = CRYPT_PK_INVALID_TYPE;
goto LBL_ERR;
} else {
/* it's a public key and we lack e */
if ((err = der_decode_sequence_multi(in, inlen,
LTC_ASN1_INTEGER, 1UL, key->N,
LTC_ASN1_INTEGER, 1UL, key->e,
LTC_ASN1_EOL, 0UL, NULL)) != CRYPT_OK) {
goto LBL_ERR;
}
key->type = PK_PUBLIC;
}
err = CRYPT_OK;
goto LBL_FREE;
LBL_ERR:
mp_clear_multi(key->d, key->e, key->N, key->dQ, key->dP, key->qP, key->p, key->q, NULL);
LBL_FREE:
if (tmpbuf != NULL) {
XFREE(tmpbuf);
}
return err;
}
#endif /* LTC_MRSA */
/* ref: $Format:%D$ */
/* git commit: $Format:%H$ */
/* commit time: $Format:%ai$ */
| mangosthree/server | dep/tomlib/Crypt/src/pk/rsa/rsa_import.c | C | gpl-2.0 | 4,169 |
/*
Lucy the Diamond Girl - Game where player collects diamonds.
Copyright (C) 2005-2015 Joni Yrjänä <joniyrjana@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Complete license can be found in the LICENSE file.
*/
#include "widget.h"
#include <assert.h>
void widget_set_navigation_right(struct widget * widget, struct widget * right)
{
assert(widget != NULL);
assert(right != NULL);
assert(widget != right);
assert(widget->navigation_right_ == NULL);
widget->navigation_right_ = right;
stack_push(widget->widgets_linking_to_this, right);
stack_push(right->widgets_linking_to_this, widget);
}
| Peanhua/diamond-girl | src/widget_set_navigation_right.c | C | gpl-2.0 | 1,294 |