@ -60,14 +60,23 @@ sched_ue::sched_ue() : dl_next_alloc(NULL), ul_next_alloc(NULL), has_pucch(false
bzero ( & dl_harq , sizeof ( dl_harq ) ) ;
bzero ( & ul_harq , sizeof ( ul_harq ) ) ;
bzero ( & dl_ant_info , sizeof ( dl_ant_info ) ) ;
pthread_mutex_init ( & mutex , NULL ) ;
reset ( ) ;
}
sched_ue : : ~ sched_ue ( ) {
pthread_mutex_lock ( & mutex ) ;
pthread_mutex_unlock ( & mutex ) ;
pthread_mutex_destroy ( & mutex ) ;
}
void sched_ue : : set_cfg ( uint16_t rnti_ , sched_interface : : ue_cfg_t * cfg_ , sched_interface : : cell_cfg_t * cell_cfg ,
srslte_regs_t * regs , srslte : : log * log_h_ )
{
reset ( ) ;
pthread_mutex_lock ( & mutex ) ;
rnti = rnti_ ;
log_h = log_h_ ;
memcpy ( & cell , & cell_cfg - > cell , sizeof ( srslte_cell_t ) ) ;
@ -81,10 +90,6 @@ void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_in
}
Info ( " SCHED: Added user rnti=0x%x \n " , rnti ) ;
for ( int i = 0 ; i < sched_interface : : MAX_LC ; i + + ) {
set_bearer_cfg ( i , & cfg . ue_bearers [ i ] ) ;
}
// Config HARQ processes
for ( int i = 0 ; i < SCHED_MAX_HARQ_PROC ; i + + ) {
dl_harq [ i ] . config ( i , cfg . maxharq_tx , log_h ) ;
@ -97,10 +102,17 @@ void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_in
sched : : generate_cce_location ( regs , & dci_locations [ cfi ] [ sf_idx ] , cfi + 1 , sf_idx , rnti ) ;
}
}
pthread_mutex_unlock ( & mutex ) ;
for ( int i = 0 ; i < sched_interface : : MAX_LC ; i + + ) {
set_bearer_cfg ( i , & cfg . ue_bearers [ i ] ) ;
}
}
void sched_ue : : reset ( )
{
pthread_mutex_lock ( & mutex ) ;
bzero ( & cfg , sizeof ( sched_interface : : ue_cfg_t ) ) ;
sr = false ;
next_tpc_pusch = 1 ;
@ -123,17 +135,22 @@ void sched_ue::reset()
ul_harq [ i ] . reset ( tb ) ;
}
}
pthread_mutex_unlock ( & mutex ) ;
for ( int i = 0 ; i < sched_interface : : MAX_LC ; i + + ) {
rem_bearer ( i ) ;
}
}
void sched_ue : : set_fixed_mcs ( int mcs_ul , int mcs_dl ) {
pthread_mutex_lock ( & mutex ) ;
fixed_mcs_ul = mcs_ul ;
fixed_mcs_dl = mcs_dl ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : set_max_mcs ( int mcs_ul , int mcs_dl ) {
pthread_mutex_lock ( & mutex ) ;
if ( mcs_ul < 0 ) {
max_mcs_ul = 28 ;
} else {
@ -144,6 +161,7 @@ void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl) {
} else {
max_mcs_dl = mcs_dl ;
}
pthread_mutex_unlock ( & mutex ) ;
}
@ -155,6 +173,7 @@ void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl) {
void sched_ue : : set_bearer_cfg ( uint32_t lc_id , sched_interface : : ue_bearer_cfg_t * cfg )
{
pthread_mutex_lock ( & mutex ) ;
if ( lc_id < sched_interface : : MAX_LC ) {
memcpy ( & lch [ lc_id ] . cfg , cfg , sizeof ( sched_interface : : ue_bearer_cfg_t ) ) ;
lch [ lc_id ] . buf_tx = 0 ;
@ -163,13 +182,16 @@ void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t*
Info ( " SCHED: Set bearer config lc_id=%d, direction=%d \n " , lc_id , ( int ) lch [ lc_id ] . cfg . direction ) ;
}
}
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : rem_bearer ( uint32_t lc_id )
{
pthread_mutex_lock ( & mutex ) ;
if ( lc_id < sched_interface : : MAX_LC ) {
bzero ( & lch [ lc_id ] , sizeof ( ue_bearer_t ) ) ;
}
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : phy_config_enabled ( uint32_t tti , bool enabled )
@ -180,6 +202,7 @@ void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
void sched_ue : : ul_buffer_state ( uint8_t lc_id , uint32_t bsr , bool set_value )
{
pthread_mutex_lock ( & mutex ) ;
if ( lc_id < sched_interface : : MAX_LC ) {
if ( set_value ) {
lch [ lc_id ] . bsr = bsr ;
@ -189,25 +212,30 @@ void sched_ue::ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value)
}
Debug ( " SCHED: bsr=%d, lcid=%d, bsr={%d,%d,%d,%d} \n " , bsr , lc_id ,
lch [ 0 ] . bsr , lch [ 1 ] . bsr , lch [ 2 ] . bsr , lch [ 3 ] . bsr ) ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : ul_phr ( int phr )
{
power_headroom = phr ;
power_headroom = phr ;
}
void sched_ue : : dl_buffer_state ( uint8_t lc_id , uint32_t tx_queue , uint32_t retx_queue )
{
pthread_mutex_lock ( & mutex ) ;
if ( lc_id < sched_interface : : MAX_LC ) {
lch [ lc_id ] . buf_retx = retx_queue ;
lch [ lc_id ] . buf_tx = tx_queue ;
Debug ( " SCHED: DL lcid=%d buffer_state=%d,%d \n " , lc_id , tx_queue , retx_queue ) ;
}
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : mac_buffer_state ( uint32_t ce_code )
{
pthread_mutex_lock ( & mutex ) ;
buf_mac + + ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : set_sr ( )
@ -222,42 +250,59 @@ void sched_ue::unset_sr()
bool sched_ue : : pucch_sr_collision ( uint32_t current_tti , uint32_t n_cce )
{
bool ret = false ;
pthread_mutex_lock ( & mutex ) ;
uint32_t n_pucch_sr , n_pucch_nosr ;
srslte_pucch_sched_t pucch_sched ;
bool has_sr ;
if ( ! phy_config_dedicated_enabled ) {
return false ;
goto unlock ;
}
srslte_pucch_sched_t pucch_sched ;
pucch_sched . sps_enabled = false ;
pucch_sched . n_pucch_sr = cfg . sr_N_pucch ;
pucch_sched . n_pucch_2 = cfg . n_pucch_cqi ;
pucch_sched . N_pucch_1 = cfg . pucch_cfg . n1_pucch_an ;
bool has_sr = cfg . sr_enabled & & srslte_ue_ul_sr_send_tti ( cfg . sr_I , current_tti ) ;
has_sr = cfg . sr_enabled & & srslte_ue_ul_sr_send_tti ( cfg . sr_I , current_tti ) ;
if ( ! has_sr ) {
return false ;
goto unlock ;
}
uint32_t n_pucch_sr = srslte_pucch_get_npucch ( n_cce , SRSLTE_PUCCH_FORMAT_1A , true , & pucch_sched ) ;
uint32_t n_pucch_nosr = srslte_pucch_get_npucch ( n_cce , SRSLTE_PUCCH_FORMAT_1A , false , & pucch_sched ) ;
n_pucch_sr = srslte_pucch_get_npucch ( n_cce , SRSLTE_PUCCH_FORMAT_1A , true , & pucch_sched ) ;
n_pucch_nosr = srslte_pucch_get_npucch ( n_cce , SRSLTE_PUCCH_FORMAT_1A , false , & pucch_sched ) ;
if ( srslte_pucch_n_prb ( & cfg . pucch_cfg , SRSLTE_PUCCH_FORMAT_1A , n_pucch_sr , cell . nof_prb , cell . cp , 0 ) = =
srslte_pucch_n_prb ( & cfg . pucch_cfg , SRSLTE_PUCCH_FORMAT_1A , n_pucch_nosr , cell . nof_prb , cell . cp , 0 ) )
{
return true ;
ret = true ;
} else {
return false ;
ret = false ;
}
unlock :
pthread_mutex_unlock ( & mutex ) ;
return ret ;
}
bool sched_ue : : get_pucch_sched ( uint32_t current_tti , uint32_t prb_idx [ 2 ] )
{
bool ret = false ;
bool has_sr ;
pthread_mutex_lock ( & mutex ) ;
if ( ! phy_config_dedicated_enabled ) {
return false ;
goto unlock ;
}
srslte_pucch_sched_t pucch_sched ;
pucch_sched . sps_enabled = false ;
pucch_sched . n_pucch_sr = cfg . sr_N_pucch ;
pucch_sched . n_pucch_2 = cfg . n_pucch_cqi ;
pucch_sched . N_pucch_1 = cfg . pucch_cfg . n1_pucch_an ;
bool has_sr = cfg . sr_enabled & & srslte_ue_ul_sr_send_tti ( cfg . sr_I , current_tti ) ;
has_sr = cfg . sr_enabled & & srslte_ue_ul_sr_send_tti ( cfg . sr_I , current_tti ) ;
// First check if it has pending ACKs
for ( int i = 0 ; i < SCHED_MAX_HARQ_PROC ; i + + ) {
@ -270,7 +315,8 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
Debug ( " SCHED: Reserved Format1A PUCCH for rnti=0x%x, n_prb=%d,%d, n_pucch=%d, ncce=%d, has_sr=%d, n_pucch_1=%d \n " ,
rnti , prb_idx [ 0 ] , prb_idx [ 1 ] , n_pucch , dl_harq [ i ] . get_n_cce ( ) , has_sr , pucch_sched . N_pucch_1 ) ;
}
return true ;
ret = true ;
goto unlock ;
}
}
// If there is no Format1A/B, then check if it's expecting Format1
@ -281,7 +327,8 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
}
}
Debug ( " SCHED: Reserved Format1 PUCCH for rnti=0x%x, n_prb=%d,%d, n_pucch=%d \n " , rnti , prb_idx [ 0 ] , prb_idx [ 1 ] , cfg . sr_N_pucch ) ;
return true ;
ret = true ;
goto unlock ;
}
// Finally check Format2 (periodic CQI)
if ( cfg . cqi_enabled & & srslte_cqi_send ( cfg . cqi_idx , current_tti ) ) {
@ -292,27 +339,41 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
Debug ( " SCHED: Reserved Format2 PUCCH for rnti=0x%x, n_prb=%d,%d, n_pucch=%d, pmi_idx=%d \n " ,
rnti , prb_idx [ 0 ] , prb_idx [ 1 ] , cfg . cqi_pucch , cfg . cqi_idx ) ;
}
return true ;
ret = true ;
goto unlock ;
}
return false ;
ret = false ;
unlock :
pthread_mutex_unlock ( & mutex ) ;
return ret ;
}
int sched_ue : : set_ack_info ( uint32_t tti , uint32_t tb_idx , bool ack )
{
pthread_mutex_lock ( & mutex ) ;
int ret = - 1 ;
for ( int i = 0 ; i < SCHED_MAX_HARQ_PROC ; i + + ) {
if ( TTI_TX ( dl_harq [ i ] . get_tti ( ) ) = = tti ) {
Debug ( " SCHED: Set ACK=%d for rnti=0x%x, pid=%d.%d, tti=%d \n " , ack , rnti , i , tb_idx , tti ) ;
dl_harq [ i ] . set_ack ( tb_idx , ack ) ;
return dl_harq [ i ] . get_tbs ( tb_idx ) ;
ret = dl_harq [ i ] . get_tbs ( tb_idx ) ;
goto unlock ;
}
}
Warning ( " SCHED: Received ACK info for unknown TTI=%d \n " , tti ) ;
return - 1 ;
ret = - 1 ;
unlock :
pthread_mutex_unlock ( & mutex ) ;
return ret ;
}
void sched_ue : : ul_recv_len ( uint32_t lcid , uint32_t len )
{
pthread_mutex_lock ( & mutex ) ;
// Remove PDCP header??
if ( len > 4 ) {
len - = 4 ;
@ -328,54 +389,72 @@ void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
}
Debug ( " SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d} \n " , len , lcid ,
lch [ 0 ] . bsr , lch [ 1 ] . bsr , lch [ 2 ] . bsr , lch [ 3 ] . bsr ) ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : set_ul_crc ( uint32_t tti , bool crc_res )
{
pthread_mutex_lock ( & mutex ) ;
get_ul_harq ( tti ) - > set_ack ( 0 , crc_res ) ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : set_dl_ri ( uint32_t tti , uint32_t ri )
{
pthread_mutex_lock ( & mutex ) ;
dl_ri = ri ;
dl_ri_tti = tti ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : set_dl_pmi ( uint32_t tti , uint32_t pmi )
{
pthread_mutex_lock ( & mutex ) ;
dl_pmi = pmi ;
dl_pmi_tti = tti ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : set_dl_cqi ( uint32_t tti , uint32_t cqi )
{
pthread_mutex_lock ( & mutex ) ;
dl_cqi = cqi ;
dl_cqi_tti = tti ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : set_dl_ant_info ( LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT * d )
{
pthread_mutex_lock ( & mutex ) ;
memcpy ( & dl_ant_info , d , sizeof ( LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT ) ) ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : set_ul_cqi ( uint32_t tti , uint32_t cqi , uint32_t ul_ch_code )
{
pthread_mutex_lock ( & mutex ) ;
ul_cqi = cqi ;
ul_cqi_tti = tti ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : tpc_inc ( ) {
pthread_mutex_lock ( & mutex ) ;
if ( power_headroom > 0 ) {
next_tpc_pusch = 3 ;
next_tpc_pucch = 3 ;
}
log_h - > info ( " SCHED: Set TCP=%d for rnti=0x%x \n " , next_tpc_pucch , rnti ) ;
pthread_mutex_unlock ( & mutex ) ;
}
void sched_ue : : tpc_dec ( ) {
pthread_mutex_lock ( & mutex ) ;
next_tpc_pusch = 0 ;
next_tpc_pucch = 0 ;
log_h - > info ( " SCHED: Set TCP=%d for rnti=0x%x \n " , next_tpc_pucch , rnti ) ;
pthread_mutex_unlock ( & mutex ) ;
}
/*******************************************************
@ -391,6 +470,8 @@ int sched_ue::generate_format1(dl_harq_proc *h,
uint32_t tti ,
uint32_t cfi )
{
pthread_mutex_lock ( & mutex ) ;
srslte_ra_dl_dci_t * dci = & data - > dci ;
bzero ( dci , sizeof ( srslte_ra_dl_dci_t ) ) ;
@ -410,7 +491,7 @@ int sched_ue::generate_format1(dl_harq_proc *h,
}
if ( h - > is_empty ( 0 ) ) {
uint32_t req_bytes = get_pending_dl_new_data ( tti ) ;
uint32_t req_bytes = get_pending_dl_new_data _unlocked ( tti ) ;
uint32_t nof_prb = format1_count_prb ( h - > get_rbgmask ( ) , cell . nof_prb ) ;
srslte_ra_dl_grant_t grant ;
@ -463,6 +544,7 @@ int sched_ue::generate_format1(dl_harq_proc *h,
dci - > tb_en [ 0 ] = true ;
dci - > tb_en [ 1 ] = false ;
}
pthread_mutex_unlock ( & mutex ) ;
return tbs ;
}
@ -471,8 +553,21 @@ int sched_ue::generate_format2a(dl_harq_proc *h,
sched_interface : : dl_sched_data_t * data ,
uint32_t tti ,
uint32_t cfi )
{
pthread_mutex_lock ( & mutex ) ;
int ret = generate_format2a_unlocked ( h , data , tti , cfi ) ;
pthread_mutex_unlock ( & mutex ) ;
return ret ;
}
// Generates a Format2a grant
int sched_ue : : generate_format2a_unlocked ( dl_harq_proc * h ,
sched_interface : : dl_sched_data_t * data ,
uint32_t tti ,
uint32_t cfi )
{
bool tb_en [ SRSLTE_MAX_TB ] = { false } ;
srslte_ra_dl_dci_t * dci = & data - > dci ;
bzero ( dci , sizeof ( srslte_ra_dl_dci_t ) ) ;
@ -512,7 +607,7 @@ int sched_ue::generate_format2a(dl_harq_proc *h,
}
for ( uint32_t tb = 0 ; tb < SRSLTE_MAX_TB ; tb + + ) {
uint32_t req_bytes = get_pending_dl_new_data ( tti ) ;
uint32_t req_bytes = get_pending_dl_new_data _unlocked ( tti ) ;
int mcs = 0 ;
int tbs = 0 ;
@ -566,17 +661,22 @@ int sched_ue::generate_format2a(dl_harq_proc *h,
dci - > tpc_pucch = ( uint8_t ) next_tpc_pucch ;
next_tpc_pucch = 1 ;
return data - > tbs [ 0 ] + data - > tbs [ 1 ] ;
int ret = data - > tbs [ 0 ] + data - > tbs [ 1 ] ;
return ret ;
}
// Generates a Format2 grant
int sched_ue : : generate_format2 ( dl_harq_proc * h ,
sched_interface : : dl_sched_data_t * data ,
uint32_t tti ,
uint32_t cfi )
{
pthread_mutex_lock ( & mutex ) ;
/* Call Format 2a (common) */
int ret = generate_format2a ( h , data , tti , cfi ) ;
int ret = generate_format2a _unlocked ( h , data , tti , cfi ) ;
/* Compute precoding information */
if ( SRSLTE_RA_DL_GRANT_NOF_TB ( & data - > dci ) = = 1 ) {
@ -585,6 +685,8 @@ int sched_ue::generate_format2(dl_harq_proc *h,
data - > dci . pinfo = ( uint8_t ) ( dl_pmi & 1 ) ;
}
pthread_mutex_unlock ( & mutex ) ;
return ret ;
}
@ -594,6 +696,8 @@ int sched_ue::generate_format0(ul_harq_proc *h,
uint32_t tti ,
bool cqi_request )
{
pthread_mutex_lock ( & mutex ) ;
srslte_ra_ul_dci_t * dci = & data - > dci ;
bzero ( dci , sizeof ( srslte_ra_ul_dci_t ) ) ;
@ -608,7 +712,7 @@ int sched_ue::generate_format0(ul_harq_proc *h,
h - > new_tx ( tti , mcs , tbs ) ;
} else if ( h - > is_empty ( 0 ) ) {
uint32_t req_bytes = get_pending_ul_new_data ( tti ) ;
uint32_t req_bytes = get_pending_ul_new_data _unlocked ( tti ) ;
uint32_t N_srs = 0 ;
uint32_t nof_re = ( 2 * ( SRSLTE_CP_NSYMB ( cell . cp ) - 1 ) - N_srs ) * allocation . L * SRSLTE_NRE ;
@ -646,6 +750,8 @@ int sched_ue::generate_format0(ul_harq_proc *h,
next_tpc_pusch = 1 ;
}
pthread_mutex_unlock ( & mutex ) ;
return tbs ;
}
@ -678,11 +784,20 @@ bool sched_ue::is_first_dl_tx()
}
bool sched_ue : : needs_cqi ( uint32_t tti , bool will_be_sent )
{
pthread_mutex_lock ( & mutex ) ;
bool ret = needs_cqi_unlocked ( tti , will_be_sent ) ;
pthread_mutex_unlock ( & mutex ) ;
return ret ;
}
// Private lock-free implemenentation
bool sched_ue : : needs_cqi_unlocked ( uint32_t tti , bool will_be_sent )
{
bool ret = false ;
if ( phy_config_dedicated_enabled & &
cfg . aperiodic_cqi_period & &
get_pending_dl_new_data ( tti ) > 0 )
get_pending_dl_new_data _unlocked ( tti ) > 0 )
{
uint32_t interval = srslte_tti_interval ( tti , dl_cqi_tti ) ;
bool needscqi = interval > = cfg . aperiodic_cqi_period ;
@ -702,12 +817,9 @@ bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent)
uint32_t sched_ue : : get_pending_dl_new_data ( uint32_t tti )
{
uint32_t pending_data = 0 ;
for ( int i = 0 ; i < sched_interface : : MAX_LC ; i + + ) {
if ( bearer_is_dl ( & lch [ i ] ) ) {
pending_data + = lch [ i ] . buf_retx + lch [ i ] . buf_tx ;
}
}
pthread_mutex_lock ( & mutex ) ;
uint32_t pending_data = get_pending_dl_new_data_unlocked ( tti ) ;
pthread_mutex_unlock ( & mutex ) ;
return pending_data ;
}
@ -717,17 +829,48 @@ uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti)
/// \return number of bytes to be allocated
uint32_t sched_ue : : get_pending_dl_new_data_total ( uint32_t tti )
{
uint32_t req_bytes = get_pending_dl_new_data ( tti ) ;
pthread_mutex_lock ( & mutex ) ;
uint32_t req_bytes = get_pending_dl_new_data_unlocked ( tti ) ;
if ( req_bytes > 0 ) {
req_bytes + = ( req_bytes < 128 ) ? 2 : 3 ; // consider the header
if ( is_first_dl_tx ( ) ) {
req_bytes + = 6 ; // count for RAR
}
}
pthread_mutex_unlock ( & mutex ) ;
return req_bytes ;
}
// Private lock-free implementation
uint32_t sched_ue : : get_pending_dl_new_data_unlocked ( uint32_t tti )
{
uint32_t pending_data = 0 ;
for ( int i = 0 ; i < sched_interface : : MAX_LC ; i + + ) {
if ( bearer_is_dl ( & lch [ i ] ) ) {
pending_data + = lch [ i ] . buf_retx + lch [ i ] . buf_tx ;
}
}
return pending_data ;
}
uint32_t sched_ue : : get_pending_ul_new_data ( uint32_t tti )
{
pthread_mutex_lock ( & mutex ) ;
uint32_t pending_data = get_pending_ul_new_data_unlocked ( tti ) ;
pthread_mutex_unlock ( & mutex ) ;
return pending_data ;
}
uint32_t sched_ue : : get_pending_ul_old_data ( )
{
pthread_mutex_lock ( & mutex ) ;
uint32_t pending_data = get_pending_ul_old_data_unlocked ( ) ;
pthread_mutex_unlock ( & mutex ) ;
return pending_data ;
}
// Private lock-free implementation
uint32_t sched_ue : : get_pending_ul_new_data_unlocked ( uint32_t tti )
{
uint32_t pending_data = 0 ;
for ( int i = 0 ; i < sched_interface : : MAX_LC ; i + + ) {
@ -738,10 +881,10 @@ uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
if ( ! pending_data & & is_sr_triggered ( ) ) {
return 512 ;
}
if ( ! pending_data & & needs_cqi ( tti ) ) {
if ( ! pending_data & & needs_cqi _unlocked ( tti ) ) {
return 128 ;
}
uint32_t pending_ul_data = get_pending_ul_old_data ( ) ;
uint32_t pending_ul_data = get_pending_ul_old_data _unlocked ( ) ;
if ( pending_data > pending_ul_data ) {
pending_data - = pending_ul_data ;
} else {
@ -749,12 +892,13 @@ uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
}
if ( pending_data ) {
Debug ( " SCHED: pending_data=%d, pending_ul_data=%d, bsr={%d,%d,%d,%d} \n " , pending_data , pending_ul_data ,
lch [ 0 ] . bsr , lch [ 1 ] . bsr , lch [ 2 ] . bsr , lch [ 3 ] . bsr ) ;
lch [ 0 ] . bsr , lch [ 1 ] . bsr , lch [ 2 ] . bsr , lch [ 3 ] . bsr ) ;
}
return pending_data ;
}
uint32_t sched_ue : : get_pending_ul_old_data ( )
// Private lock-free implementation
uint32_t sched_ue : : get_pending_ul_old_data_unlocked ( )
{
uint32_t pending_data = 0 ;
for ( int i = 0 ; i < SCHED_MAX_HARQ_PROC ; i + + ) {
@ -775,6 +919,8 @@ uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg)
uint32_t sched_ue : : get_required_prb_dl ( uint32_t req_bytes , uint32_t nof_ctrl_symbols )
{
pthread_mutex_lock ( & mutex ) ;
int mcs = 0 ;
uint32_t nof_re = 0 ;
int tbs = 0 ;
@ -791,10 +937,12 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
if ( tbs > 0 ) {
nbytes = tbs ;
} else if ( tbs < 0 ) {
pthread_mutex_unlock ( & mutex ) ;
return 0 ;
}
}
pthread_mutex_unlock ( & mutex ) ;
return n ;
}
@ -810,6 +958,8 @@ uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)
return 0 ;
}
pthread_mutex_lock ( & mutex ) ;
for ( n = 1 ; n < cell . nof_prb & & nbytes < req_bytes + 4 ; n + + ) {
uint32_t nof_re = ( 2 * ( SRSLTE_CP_NSYMB ( cell . cp ) - 1 ) - N_srs ) * n * SRSLTE_NRE ;
int tbs = 0 ;
@ -827,6 +977,8 @@ uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)
n + + ;
}
pthread_mutex_unlock ( & mutex ) ;
return n ;
}
@ -839,6 +991,9 @@ bool sched_ue::is_sr_triggered()
dl_harq_proc * sched_ue : : get_pending_dl_harq ( uint32_t tti )
{
# if ASYNC_DL_SCHED
pthread_mutex_lock ( & mutex ) ;
int oldest_idx = - 1 ;
uint32_t oldest_tti = 0 ;
for ( int i = 0 ; i < SCHED_MAX_HARQ_PROC ; i + + ) {
@ -850,11 +1005,15 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
}
}
}
dl_harq_proc * h = NULL ;
if ( oldest_idx > = 0 ) {
return & dl_harq [ oldest_idx ] ;
} else {
return NULL ;
h = & dl_harq [ oldest_idx ] ;
}
pthread_mutex_unlock ( & mutex ) ;
return h ;
# else
return & dl_harq [ tti % SCHED_MAX_HARQ_PROC ] ;
# endif
@ -862,12 +1021,16 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
dl_harq_proc * sched_ue : : get_empty_dl_harq ( )
{
for ( int i = 0 ; i < SCHED_MAX_HARQ_PROC ; i + + ) {
pthread_mutex_lock ( & mutex ) ;
dl_harq_proc * h = NULL ;
for ( int i = 0 ; i < SCHED_MAX_HARQ_PROC & & ! h ; i + + ) {
if ( dl_harq [ i ] . is_empty ( 0 ) & & dl_harq [ i ] . is_empty ( 1 ) ) {
return & dl_harq [ i ] ;
h = & dl_harq [ i ] ;
}
}
return NULL ;
pthread_mutex_unlock ( & mutex ) ;
return h ;
}
ul_harq_proc * sched_ue : : get_ul_harq ( uint32_t tti )
@ -908,6 +1071,7 @@ srslte_dci_format_t sched_ue::get_dci_format() {
/* Find lowest DCI aggregation level supported by the UE spectral efficiency */
uint32_t sched_ue : : get_aggr_level ( uint32_t nof_bits )
{
pthread_mutex_lock ( & mutex ) ;
uint32_t l = 0 ;
float max_coderate = srslte_cqi_to_coderate ( dl_cqi ) ;
float coderate = 99 ;
@ -922,6 +1086,7 @@ uint32_t sched_ue::get_aggr_level(uint32_t nof_bits)
l + + ;
} while ( l < l_max & & factor * coderate > max_coderate ) ;
Debug ( " SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f \n " , dl_cqi , l , nof_bits , coderate , max_coderate ) ;
pthread_mutex_unlock ( & mutex ) ;
return l ;
}
@ -1029,9 +1194,6 @@ int sched_ue::alloc_tbs(uint32_t nof_prb,
uint32_t max_Qm = is_ul ? 4 : 6 ; // Allow 16-QAM in PUSCH Only
// TODO: Compute real spectral efficiency based on PUSCH-UCI configuration
if ( has_pucch & & is_ul ) {
cqi - = 3 ;
}
int tbs = cqi_to_tbs ( cqi , nof_prb , nof_re , max_mcs , max_Qm , & sel_mcs ) / 8 ;